aboutsummaryrefslogtreecommitdiff
path: root/test/functional/test_runner.py
diff options
context:
space:
mode:
authorlaanwj <126646+laanwj@users.noreply.github.com>2022-01-05 17:19:47 +0100
committerlaanwj <126646+laanwj@users.noreply.github.com>2022-01-05 17:19:54 +0100
commit121d47afe3e67ff7f94d26e08a39573dccf652aa (patch)
tree4a0a01c5f1f61fba45201be7f2c51748d8448ad6 /test/functional/test_runner.py
parentaf7fba3af788e91a460582351d40f8f5e2118760 (diff)
parent975097f424541a149c5b4e03816208aa76aad6b9 (diff)
downloadbitcoin-121d47afe3e67ff7f94d26e08a39573dccf652aa.tar.xz
Merge bitcoin/bitcoin#23799: test: Let test_runner.py start multiple jobs per timeslot
975097f424541a149c5b4e03816208aa76aad6b9 Let test_runner.py start multiple jobs per timeslot (Pieter Wuille) Pull request description: test_runner.py currently only checks every 0.5s whether any job has finished, and if so, starts at most one new job. At higher parallellism it becomes increasingly likely that multiple jobs have finished at the same time. Fix this by always noticing *all* finished jobs every timeslot, and starting as many new ones. ACKs for top commit: laanwj: Code review and lightly tested ACK 975097f424541a149c5b4e03816208aa76aad6b9 prayank23: ACK https://github.com/bitcoin/bitcoin/pull/23799/commits/975097f424541a149c5b4e03816208aa76aad6b9 Tree-SHA512: b70c51f05efcde9bc25475c192b86e86b4c399495b42dee20576af3e6b99e8298be8b9e82146abdabbaedb24a13ee158a7c8947518b16fc4f33a3b434935b550
Diffstat (limited to 'test/functional/test_runner.py')
-rwxr-xr-xtest/functional/test_runner.py63
1 files changed, 34 insertions, 29 deletions
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index df9aec6281..eb2d030f4a 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -530,33 +530,35 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
- for i in range(test_count):
- test_result, testdir, stdout, stderr = job_queue.get_next()
- test_results.append(test_result)
- done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
- if test_result.status == "Passed":
- logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
- elif test_result.status == "Skipped":
- logging.debug("%s skipped" % (done_str))
- else:
- print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
- print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
- print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
- if combined_logs_len and os.path.isdir(testdir):
- # Print the final `combinedlogslen` lines of the combined logs
- print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
- print('\n============')
- print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
- print('============\n')
- combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
- if BOLD[0]:
- combined_logs_args += ['--color']
- combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
- print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
-
- if failfast:
- logging.debug("Early exiting after test failure")
- break
+ i = 0
+ while i < test_count:
+ for test_result, testdir, stdout, stderr in job_queue.get_next():
+ test_results.append(test_result)
+ i += 1
+ done_str = "{}/{} - {}{}{}".format(i, test_count, BOLD[1], test_result.name, BOLD[0])
+ if test_result.status == "Passed":
+ logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
+ elif test_result.status == "Skipped":
+ logging.debug("%s skipped" % (done_str))
+ else:
+ print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
+ print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
+ print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
+ if combined_logs_len and os.path.isdir(testdir):
+ # Print the final `combinedlogslen` lines of the combined logs
+ print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
+ print('\n============')
+ print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
+ print('============\n')
+ combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
+ if BOLD[0]:
+ combined_logs_args += ['--color']
+ combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
+ print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
+
+ if failfast:
+ logging.debug("Early exiting after test failure")
+ break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
@@ -650,8 +652,9 @@ class TestHandler:
dot_count = 0
while True:
- # Return first proc that finishes
+ # Return all procs that have finished, if any. Otherwise sleep until there is one.
time.sleep(.5)
+ ret = []
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if proc.poll() is not None:
@@ -670,7 +673,9 @@ class TestHandler:
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
- return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
+ ret.append((TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr))
+ if ret:
+ return ret
if self.use_term_control:
print('.', end='', flush=True)
dot_count += 1