diff options
Diffstat (limited to 'test/functional')
-rwxr-xr-x | test/functional/feature_pruning.py | 21 | ||||
-rwxr-xr-x | test/functional/feature_reindex.py | 7 | ||||
-rwxr-xr-x | test/functional/rpc_net.py | 10 | ||||
-rw-r--r-- | test/functional/test_framework/util.py | 11 | ||||
-rwxr-xr-x | test/functional/test_runner.py | 68 | ||||
-rwxr-xr-x | test/functional/wallet_basic.py | 4 |
6 files changed, 50 insertions, 71 deletions
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 49ad7f838c..8dfa1be2fa 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -11,7 +11,6 @@ This test takes 30 mins or more (up to 2 hours) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * -import time import os MIN_BLOCKS_TO_KEEP = 288 @@ -23,7 +22,7 @@ TIMESTAMP_WINDOW = 2 * 60 * 60 def calc_usage(blockdir): - return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(blockdir+f)) / (1024. * 1024.) + return sum(os.path.getsize(blockdir+f) for f in os.listdir(blockdir) if os.path.isfile(os.path.join(blockdir, f))) / (1024. * 1024.) class PruneTest(BitcoinTestFramework): def set_test_params(self): @@ -70,7 +69,7 @@ class PruneTest(BitcoinTestFramework): sync_blocks(self.nodes[0:5]) def test_height_min(self): - if not os.path.isfile(self.prunedir+"blk00000.dat"): + if not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")): raise AssertionError("blk00000.dat is missing, pruning too early") self.log.info("Success") self.log.info("Though we're already using more than 550MiB, current usage: %d" % calc_usage(self.prunedir)) @@ -79,11 +78,8 @@ class PruneTest(BitcoinTestFramework): for i in range(25): mine_large_block(self.nodes[0], self.utxo_cache_0) - waitstart = time.time() - while os.path.isfile(self.prunedir+"blk00000.dat"): - time.sleep(0.1) - if time.time() - waitstart > 30: - raise AssertionError("blk00000.dat not pruned when it should be") + # Wait for blk00000.dat to be pruned + wait_until(lambda: not os.path.isfile(os.path.join(self.prunedir, "blk00000.dat")), timeout=30) self.log.info("Success") usage = calc_usage(self.prunedir) @@ -218,11 +214,8 @@ class PruneTest(BitcoinTestFramework): goalbestheight = first_reorg_height + 1 self.log.info("Verify node 2 reorged back to the main chain, some blocks of which it had to redownload") - waitstart = time.time() - while self.nodes[2].getblockcount() < goalbestheight: - time.sleep(0.1) - if time.time() - waitstart > 900: - raise AssertionError("Node 2 didn't reorg to proper height") + # Wait for Node 2 to reorg to proper height + wait_until(lambda: self.nodes[2].getblockcount() >= goalbestheight, timeout=900) assert(self.nodes[2].getbestblockhash() == goalbesthash) # Verify we can now have the data for a block previously pruned assert(self.nodes[2].getblock(self.forkhash)["height"] == self.forkheight) @@ -262,7 +255,7 @@ class PruneTest(BitcoinTestFramework): assert_equal(ret, expected_ret) def has_block(index): - return os.path.isfile(self.options.tmpdir + "/node{}/regtest/blocks/blk{:05}.dat".format(node_number, index)) + return os.path.isfile(os.path.join(self.nodes[node_number].datadir, "regtest", "blocks", "blk{:05}.dat".format(index))) # should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000) assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500)) diff --git a/test/functional/feature_reindex.py b/test/functional/feature_reindex.py index ac67e6e9ba..d1d3f1d7f1 100755 --- a/test/functional/feature_reindex.py +++ b/test/functional/feature_reindex.py @@ -10,8 +10,7 @@ """ from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal -import time +from test_framework.util import wait_until class ReindexTest(BitcoinTestFramework): @@ -25,9 +24,7 @@ class ReindexTest(BitcoinTestFramework): self.stop_nodes() extra_args = [["-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]] self.start_nodes(extra_args) - while self.nodes[0].getblockcount() < blockcount: - time.sleep(0.1) - assert_equal(self.nodes[0].getblockcount(), blockcount) + wait_until(lambda: self.nodes[0].getblockcount() == blockcount) self.log.info("Success") def run_test(self): diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 5f34b35bfb..d8348432aa 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -7,8 +7,6 @@ Tests correspond to code in rpc/net.cpp. """ -import time - from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -62,12 +60,8 @@ class NetTest(BitcoinTestFramework): self.nodes[0].setnetworkactive(False) assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False) - timeout = 3 - while self.nodes[0].getnetworkinfo()['connections'] != 0: - # Wait a bit for all sockets to close - assert timeout > 0, 'not all connections closed in time' - timeout -= 0.1 - time.sleep(0.1) + # Wait a bit for all sockets to close + wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3) self.nodes[0].setnetworkactive(True) connect_nodes_bi(self.nodes, 0, 1) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 7be695550b..68ac97d755 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -340,20 +340,15 @@ def disconnect_nodes(from_connection, node_num): for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]: from_connection.disconnectnode(nodeid=peer_id) - for _ in range(50): - if [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == []: - break - time.sleep(0.1) - else: - raise AssertionError("timed out waiting for disconnect") + # wait to disconnect + wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5) def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:" + str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying - while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): - time.sleep(0.1) + wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo())) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 082191098e..09f7f50de0 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -52,6 +52,9 @@ if os.name == 'posix': TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 +# 20 minutes represented in seconds +TRAVIS_TIMEOUT_DURATION = 20 * 60 + BASE_SCRIPTS= [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel @@ -233,29 +236,27 @@ def main(): sys.exit(0) # Build list of tests + test_list = [] if tests: # Individual tests have been specified. Run specified tests that exist # in the ALL_SCRIPTS list. Accept the name with or without .py extension. - tests = [re.sub("\.py$", "", t) + ".py" for t in tests] - test_list = [] - for t in tests: - if t in ALL_SCRIPTS: - test_list.append(t) + tests = [re.sub("\.py$", "", test) + ".py" for test in tests] + for test in tests: + if test in ALL_SCRIPTS: + test_list.append(test) else: - print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t)) + print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], test)) + elif args.extended: + # Include extended tests + test_list += ALL_SCRIPTS else: - # No individual tests have been specified. - # Run all base tests, and optionally run extended tests. - test_list = BASE_SCRIPTS - if args.extended: - # place the EXTENDED_SCRIPTS first since the three longest ones - # are there and the list is shorter - test_list = EXTENDED_SCRIPTS + test_list + # Run base tests only + test_list += BASE_SCRIPTS # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: - tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')] - for exclude_test in tests_excl: + exclude_tests = [re.sub("\.py$", "", test) + ".py" for test in args.exclude.split(',')] + for exclude_test in exclude_tests: if exclude_test in test_list: test_list.remove(exclude_test) else: @@ -320,7 +321,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove #Run Tests job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) - time0 = time.time() + start_time = time.time() test_results = [] max_len_name = len(max(test_list, key=len)) @@ -346,7 +347,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate() print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) - print_results(test_results, max_len_name, (int(time.time() - time0))) + print_results(test_results, max_len_name, (int(time.time() - start_time))) if coverage: coverage.report_rpc_coverage() @@ -403,15 +404,15 @@ class TestHandler: while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 - t = self.test_list.pop(0) + test = self.test_list.pop(0) portseed = len(self.test_list) + self.portseed_offset portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) - test_argv = t.split() + test_argv = test.split() testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] - self.jobs.append((t, + self.jobs.append((test, time.time(), subprocess.Popen([sys.executable, self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, @@ -425,15 +426,14 @@ class TestHandler: while True: # Return first proc that finishes time.sleep(.5) - for j in self.jobs: - (name, time0, proc, testdir, log_out, log_err) = j - if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60: - # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not - # providing useful output. + for job in self.jobs: + (name, start_time, proc, testdir, log_out, log_err) = job + if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION: + # In travis, timeout individual tests (to stop tests hanging and not providing useful output). proc.send_signal(signal.SIGINT) if proc.poll() is not None: log_out.seek(0), log_err.seek(0) - [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] + [stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)] log_out.close(), log_err.close() if proc.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" @@ -442,9 +442,9 @@ class TestHandler: else: status = "Failed" self.num_running -= 1 - self.jobs.remove(j) + self.jobs.remove(job) - return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr + return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr print('.', end='', flush=True) class TestResult(): @@ -490,7 +490,7 @@ def check_script_list(src_dir): Check that there are no scripts in the functional tests directory which are not being run by pull-tester.py.""" script_dir = src_dir + '/test/functional/' - python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"]) + python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")]) missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) @@ -526,7 +526,7 @@ class RPCCoverage(): if uncovered: print("Uncovered RPC commands:") - print("".join((" - %s\n" % i) for i in sorted(uncovered))) + print("".join((" - %s\n" % command) for command in sorted(uncovered))) else: print("All RPC commands covered.") @@ -550,8 +550,8 @@ class RPCCoverage(): if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") - with open(coverage_ref_filename, 'r') as f: - all_cmds.update([i.strip() for i in f.readlines()]) + with open(coverage_ref_filename, 'r') as coverage_ref_file: + all_cmds.update([line.strip() for line in coverage_ref_file.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: @@ -559,8 +559,8 @@ class RPCCoverage(): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: - with open(filename, 'r') as f: - covered_cmds.update([i.strip() for i in f.readlines()]) + with open(filename, 'r') as coverage_file: + covered_cmds.update([line.strip() for line in coverage_file.readlines()]) return all_cmds - covered_cmds diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index f686cb6ea5..dcd6c54d97 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -379,9 +379,9 @@ class WalletTest(BitcoinTestFramework): self.start_node(0, [m, "-limitancestorcount="+str(chainlimit)]) self.start_node(1, [m, "-limitancestorcount="+str(chainlimit)]) self.start_node(2, [m, "-limitancestorcount="+str(chainlimit)]) - while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]: + if m == '-reindex': # reindex will leave rpc warm up "early"; Wait for it to finish - time.sleep(0.1) + wait_until(lambda: [block_count] * 3 == [self.nodes[i].getblockcount() for i in range(3)]) assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)]) # Exercise listsinceblock with the last two blocks |