diff options
author | MarcoFalke <falke.marco@gmail.com> | 2018-11-01 10:52:17 -0400 |
---|---|---|
committer | MarcoFalke <falke.marco@gmail.com> | 2018-11-01 18:03:26 -0400 |
commit | fa43626611f4a76cbc361ea89e4989f80d2fa7d7 (patch) | |
tree | 229f90bd1f42041c592c3757f79cd7ec4251d91e /test/functional | |
parent | 6a095bc5f239e9345a245c3c877b650a1d054354 (diff) |
test_runner: Remove travis specific code
Diffstat (limited to 'test/functional')
-rwxr-xr-x | test/functional/test_runner.py | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 85a5de6976..8cbc9655c6 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -68,9 +68,6 @@ if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 -# 20 minutes represented in seconds -TRAVIS_TIMEOUT_DURATION = 20 * 60 - BASE_SCRIPTS = [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel @@ -216,6 +213,7 @@ def main(): formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') + parser.add_argument('--ci', action='store_true', help='Run checks and code that are usually only enabled in a continuous integration environment') parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') @@ -306,25 +304,26 @@ def main(): subprocess.check_call([sys.executable, os.path.join(config["environment"]["SRCDIR"], 'test', 'functional', test_list[0].split()[0]), '-h']) sys.exit(0) - check_script_list(config["environment"]["SRCDIR"]) + check_script_list(src_dir=config["environment"]["SRCDIR"], fail_on_warn=args.ci) check_script_prefixes() if not args.keepcache: shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) run_tests( - test_list, - config["environment"]["SRCDIR"], - config["environment"]["BUILDDIR"], - tmpdir, + test_list=test_list, + src_dir=config["environment"]["SRCDIR"], + build_dir=config["environment"]["BUILDDIR"], + tmpdir=tmpdir, jobs=args.jobs, enable_coverage=args.coverage, args=passon_args, combined_logs_len=args.combinedlogslen, - failfast=args.failfast + failfast=args.failfast, + runs_ci=args.ci, ) -def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False): +def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, runs_ci): args = args or [] # Warn if bitcoind is already running (unix only) @@ -359,7 +358,14 @@ def run_tests(test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=Fal raise #Run Tests - job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) + job_queue = TestHandler( + num_tests_parallel=jobs, + tests_dir=tests_dir, + tmpdir=tmpdir, + test_list=test_list, + flags=flags, + timeout_duration=20 * 60 if runs_ci else float('inf'), # in seconds + ) start_time = time.time() test_results = [] @@ -440,11 +446,12 @@ class TestHandler: Trigger the test scripts passed in via the list. """ - def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None): - assert(num_tests_parallel >= 1) + def __init__(self, *, num_tests_parallel, tests_dir, tmpdir, test_list, flags, timeout_duration): + assert num_tests_parallel >= 1 self.num_jobs = num_tests_parallel self.tests_dir = tests_dir self.tmpdir = tmpdir + self.timeout_duration = timeout_duration self.test_list = test_list self.flags = flags self.num_running = 0 @@ -479,7 +486,7 @@ class TestHandler: time.sleep(.5) for job in self.jobs: (name, start_time, proc, testdir, log_out, log_err) = job - if os.getenv('TRAVIS') == 'true' and int(time.time() - start_time) > TRAVIS_TIMEOUT_DURATION: + if int(time.time() - start_time) > self.timeout_duration: # In travis, timeout individual tests (to stop tests hanging and not providing useful output). proc.send_signal(signal.SIGINT) if proc.poll() is not None: @@ -557,7 +564,7 @@ def check_script_prefixes(): raise AssertionError("Some tests are not following naming convention!") -def check_script_list(src_dir): +def check_script_list(*, src_dir, fail_on_warn): """Check scripts directory. Check that there are no scripts in the functional tests directory which are @@ -567,10 +574,11 @@ def check_script_list(src_dir): missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) - if os.getenv('TRAVIS') == 'true': + if fail_on_warn: # On travis this warning is an error to prevent merging incomplete commits into master sys.exit(1) + class RPCCoverage(): """ Coverage reporting utilities for test_runner. |