diff options
Diffstat (limited to 'test/functional/test_runner.py')
-rwxr-xr-x | test/functional/test_runner.py | 215 |
1 files changed, 157 insertions, 58 deletions
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 7ec9d552a1..467e1668d1 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -23,6 +23,31 @@ import sys import subprocess import tempfile import re +import logging + +# Formatting. Default colors to empty strings. +BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") +try: + # Make sure python thinks it can write unicode to its stdout + "\u2713".encode("utf_8").decode(sys.stdout.encoding) + TICK = "✓ " + CROSS = "✖ " + CIRCLE = "○ " +except UnicodeDecodeError: + TICK = "P " + CROSS = "x " + CIRCLE = "o " + +if os.name == 'posix': + # primitive formatting on supported + # terminal via ANSI escape sequences: + BOLD = ('\033[0m', '\033[1m') + BLUE = ('\033[0m', '\033[0;34m') + RED = ('\033[0m', '\033[0;31m') + GREY = ('\033[0m', '\033[1;30m') + +TEST_EXIT_PASSED = 0 +TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS= [ # Scripts that are run by the travis build process. @@ -52,6 +77,7 @@ BASE_SCRIPTS= [ 'rawtransactions.py', 'reindex.py', # vv Tests less than 30s vv + "zmq_test.py", 'mempool_resurrect_test.py', 'txn_doublespend.py --mineblock', 'txn_clone.py', @@ -64,10 +90,11 @@ BASE_SCRIPTS= [ 'multi_rpc.py', 'proxy_test.py', 'signrawtransactions.py', - 'nodehandling.py', + 'disconnect_ban.py', 'decodescript.py', 'blockchain.py', 'disablewallet.py', + 'net.py', 'keypool.py', 'p2p-mempool.py', 'prioritise_transaction.py', @@ -83,13 +110,9 @@ BASE_SCRIPTS= [ 'rpcnamedargs.py', 'listsinceblock.py', 'p2p-leaktests.py', + 'import-abort-rescan.py', ] -ZMQ_SCRIPTS = [ - # ZMQ test can only be run if bitcoin was built with zmq-enabled. - # call test_runner.py with -nozmq to explicitly exclude these tests. - "zmq_test.py"] - EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel @@ -108,6 +131,7 @@ EXTENDED_SCRIPTS = [ 'p2p-feefilter.py', 'rpcbind_test.py', # vv Tests less than 30s vv + 'assumevalid.py', 'bip65-cltv.py', 'bip65-cltv-p2p.py', 'bipdersig-p2p.py', @@ -117,12 +141,19 @@ EXTENDED_SCRIPTS = [ 'txn_clone.py --mineblock', 'forknotify.py', 'invalidateblock.py', - 'maxblocksinflight.py', 'p2p-acceptblock.py', 'replace-by-fee.py', ] -ALL_SCRIPTS = BASE_SCRIPTS + ZMQ_SCRIPTS + EXTENDED_SCRIPTS +# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests +ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS + +NON_SCRIPTS = [ + # These are python files that live in the functional tests directory, but are not test scripts. + "combine_logs.py", + "create_cache.py", + "test_runner.py", +] def main(): # Parse arguments and pass through unrecognised args @@ -138,7 +169,8 @@ def main(): parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') - parser.add_argument('--nozmq', action='store_true', help='do not run the zmq tests') + parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') + parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') args, unknown_args = parser.parse_known_args() # Create a set to store arguments and create the passon string @@ -147,12 +179,18 @@ def main(): # Read config generated by configure. config = configparser.ConfigParser() - config.read_file(open(os.path.dirname(__file__) + "/config.ini")) + configfile = os.path.abspath(os.path.dirname(__file__)) + "/config.ini" + config.read_file(open(configfile)) + + passon_args.append("--configfile=%s" % configfile) + + # Set up logging + logging_level = logging.INFO if args.quiet else logging.DEBUG + logging.basicConfig(format='%(message)s', level=logging_level) enable_wallet = config["components"].getboolean("ENABLE_WALLET") enable_utils = config["components"].getboolean("ENABLE_UTILS") enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") - enable_zmq = config["components"].getboolean("ENABLE_ZMQ") and not args.nozmq if config["environment"]["EXEEXT"] == ".exe" and not args.force: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 @@ -165,15 +203,6 @@ def main(): print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") sys.exit(0) - # python3-zmq may not be installed. Handle this gracefully and with some helpful info - if enable_zmq: - try: - import zmq - except ImportError: - print("ERROR: \"import zmq\" failed. Use -nozmq to run without the ZMQ tests." - "To run zmq tests, see dependency info in /test/README.md.") - raise - # Build list of tests if tests: # Individual tests have been specified. Run specified tests that exist @@ -181,16 +210,13 @@ def main(): test_list = [t for t in ALL_SCRIPTS if (t in tests or re.sub(".py$", "", t) in tests)] else: - # No individual tests have been specified. Run base tests, and - # optionally ZMQ tests and extended tests. + # No individual tests have been specified. + # Run all base tests, and optionally run extended tests. test_list = BASE_SCRIPTS - if enable_zmq: - test_list += ZMQ_SCRIPTS if args.extended: - test_list += EXTENDED_SCRIPTS - # TODO: BASE_SCRIPTS and EXTENDED_SCRIPTS are sorted by runtime - # (for parallel running efficiency). This combined list will is no - # longer sorted. + # place the EXTENDED_SCRIPTS first since the three longest ones + # are there and the list is shorter + test_list = EXTENDED_SCRIPTS + test_list # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: @@ -204,19 +230,30 @@ def main(): sys.exit(0) if args.help: - # Print help for test_runner.py, then print help of the first script and exit. + # Print help for test_runner.py, then print help of the first script (with args removed) and exit. parser.print_help() - subprocess.check_call((config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0]).split() + ['-h']) + subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h']) sys.exit(0) + check_script_list(config["environment"]["SRCDIR"]) + + if not args.keepcache: + shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) + run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args) def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]): - BOLD = ("","") - if os.name == 'posix': - # primitive formatting on supported - # terminal via ANSI escape sequences: - BOLD = ('\033[0m', '\033[1m') + # Warn if bitcoind is already running (unix only) + try: + if subprocess.check_output(["pidof", "bitcoind"]) is not None: + print("%sWARNING!%s There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0])) + except (OSError, subprocess.SubprocessError): + pass + + # Warn if there is a cache directory + cache_dir = "%s/test/cache" % build_dir + if os.path.isdir(cache_dir): + print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir)) #Set env vars if "BITCOIND" not in os.environ: @@ -225,12 +262,12 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal tests_dir = src_dir + '/test/functional/' flags = ["--srcdir={}/src".format(build_dir)] + args - flags.append("--cachedir=%s/test/cache" % build_dir) + flags.append("--cachedir=%s" % cache_dir) if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) - print("Initializing coverage directory at %s\n" % coverage.dir) + logging.debug("Initializing coverage directory at %s" % coverage.dir) else: coverage = None @@ -239,38 +276,55 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal subprocess.check_output([tests_dir + 'create_cache.py'] + flags) #Run Tests - all_passed = True - time_sum = 0 - time0 = time.time() - job_queue = TestHandler(jobs, tests_dir, test_list, flags) + time0 = time.time() + test_results = [] max_len_name = len(max(test_list, key=len)) - results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0] - for _ in range(len(test_list)): - (name, stdout, stderr, passed, duration) = job_queue.get_next() - all_passed = all_passed and passed - time_sum += duration - print('\n' + BOLD[1] + name + BOLD[0] + ":") - print('' if passed else stdout + '\n', end='') - print('' if stderr == '' else 'stderr:\n' + stderr + '\n', end='') - print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration)) + for _ in range(len(test_list)): + test_result, stdout, stderr = job_queue.get_next() + test_results.append(test_result) - results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration) + if test_result.status == "Passed": + logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) + elif test_result.status == "Skipped": + logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0])) + else: + print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) + print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') + print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') - results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0] - print(results) - print("\nRuntime: %s s" % (int(time.time() - time0))) + print_results(test_results, max_len_name, (int(time.time() - time0))) if coverage: coverage.report_rpc_coverage() - print("Cleaning up coverage data") + logging.debug("Cleaning up coverage data") coverage.cleanup() + all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) + sys.exit(not all_passed) +def print_results(test_results, max_len_name, runtime): + results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] + + test_results.sort(key=lambda result: result.name.lower()) + all_passed = True + time_sum = 0 + + for test_result in test_results: + all_passed = all_passed and test_result.was_successful + time_sum += test_result.time + test_result.padding = max_len_name + results += str(test_result) + + status = TICK + "Passed" if all_passed else CROSS + "Failed" + results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] + results += "Runtime: %s s\n" % (runtime) + print(results) + class TestHandler: """ Trigger the testscrips passed in via the list. @@ -297,9 +351,10 @@ class TestHandler: port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) + test_argv = t.split() self.jobs.append((t, time.time(), - subprocess.Popen((self.tests_dir + t).split() + self.flags + port_seed, + subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + port_seed, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), @@ -316,12 +371,56 @@ class TestHandler: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] log_out.close(), log_err.close() - passed = stderr == "" and proc.returncode == 0 + if proc.returncode == TEST_EXIT_PASSED and stderr == "": + status = "Passed" + elif proc.returncode == TEST_EXIT_SKIPPED: + status = "Skipped" + else: + status = "Failed" self.num_running -= 1 self.jobs.remove(j) - return name, stdout, stderr, passed, int(time.time() - time0) + + return TestResult(name, status, int(time.time() - time0)), stdout, stderr print('.', end='', flush=True) +class TestResult(): + def __init__(self, name, status, time): + self.name = name + self.status = status + self.time = time + self.padding = 0 + + def __repr__(self): + if self.status == "Passed": + color = BLUE + glyph = TICK + elif self.status == "Failed": + color = RED + glyph = CROSS + elif self.status == "Skipped": + color = GREY + glyph = CIRCLE + + return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] + + @property + def was_successful(self): + return self.status != "Failed" + + +def check_script_list(src_dir): + """Check scripts directory. + + Check that there are no scripts in the functional tests directory which are + not being run by pull-tester.py.""" + script_dir = src_dir + '/test/functional/' + python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"]) + missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) + if len(missed_tests) != 0: + print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) + if os.getenv('TRAVIS') == 'true': + # On travis this warning is an error to prevent merging incomplete commits into master + sys.exit(1) class RPCCoverage(object): """ |