aboutsummaryrefslogtreecommitdiff
path: root/test/functional/test_runner.py
diff options
context:
space:
mode:
Diffstat (limited to 'test/functional/test_runner.py')
-rwxr-xr-xtest/functional/test_runner.py29
1 files changed, 21 insertions, 8 deletions
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 673cdb5066..39f1180a45 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -67,7 +67,7 @@ BASE_SCRIPTS= [
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
- 'wallet_accounts.py',
+ 'wallet_labels.py',
'p2p_segwit.py',
'wallet_dump.py',
'rpc_listtransactions.py',
@@ -136,6 +136,7 @@ BASE_SCRIPTS= [
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
+ 'feature_blocksdir.py',
'feature_config_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
@@ -366,7 +367,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
- test_results.sort(key=lambda result: result.name.lower())
+ test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
@@ -377,7 +378,11 @@ def print_results(test_results, max_len_name, runtime):
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
+ if not all_passed:
+ results += RED[1]
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
+ if not all_passed:
+ results += RED[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
@@ -433,7 +438,7 @@ class TestHandler:
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
- [stdout, stderr] = [file.read().decode('utf-8') for file in (log_out, log_err)]
+ [stdout, stderr] = [log_file.read().decode('utf-8') for log_file in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
@@ -454,6 +459,14 @@ class TestResult():
self.time = time
self.padding = 0
+ def sort_key(self):
+ if self.status == "Passed":
+ return 0, self.name.lower()
+ elif self.status == "Failed":
+ return 2, self.name.lower()
+ elif self.status == "Skipped":
+ return 1, self.name.lower()
+
def __repr__(self):
if self.status == "Passed":
color = BLUE
@@ -490,7 +503,7 @@ def check_script_list(src_dir):
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
- python_files = set([file for file in os.listdir(script_dir) if file.endswith(".py")])
+ python_files = set([test_file for test_file in os.listdir(script_dir) if test_file.endswith(".py")])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
@@ -550,8 +563,8 @@ class RPCCoverage():
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
- with open(coverage_ref_filename, 'r') as file:
- all_cmds.update([line.strip() for line in file.readlines()])
+ with open(coverage_ref_filename, 'r') as coverage_ref_file:
+ all_cmds.update([line.strip() for line in coverage_ref_file.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
@@ -559,8 +572,8 @@ class RPCCoverage():
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
- with open(filename, 'r') as file:
- covered_cmds.update([line.strip() for line in file.readlines()])
+ with open(filename, 'r') as coverage_file:
+ covered_cmds.update([line.strip() for line in coverage_file.readlines()])
return all_cmds - covered_cmds