diff options
-rw-r--r-- | testsuite/driver/runtests.py | 2 | ||||
-rw-r--r-- | testsuite/driver/testglobals.py | 3 | ||||
-rw-r--r-- | testsuite/driver/testlib.py | 2 |
3 files changed, 6 insertions, 1 deletions
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py index c0482d0f9d..577851cf46 100644 --- a/testsuite/driver/runtests.py +++ b/testsuite/driver/runtests.py @@ -76,6 +76,7 @@ parser.add_argument("--perf-baseline", type=GitRef, metavar='COMMIT', help="Base parser.add_argument("--test-package-db", dest="test_package_db", action="append", help="Package db providing optional packages used by the testsuite.") perf_group.add_argument("--skip-perf-tests", action="store_true", help="skip performance tests") perf_group.add_argument("--only-perf-tests", action="store_true", help="Only do performance tests") +perf_group.add_argument("--ignore-perf-failures", action="store_true", help="Don't fail due to out-of-tolerance perf tests") args = parser.parse_args() @@ -151,6 +152,7 @@ if args.verbose is not None: forceSkipPerfTests = not hasMetricsFile and not inside_git_repo() config.skip_perf_tests = args.skip_perf_tests or forceSkipPerfTests config.only_perf_tests = args.only_perf_tests +config.ignore_perf_failures = args.ignore_perf_failures if args.test_env: config.test_env = args.test_env diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py index 2b3dd48b68..117df41eb6 100644 --- a/testsuite/driver/testglobals.py +++ b/testsuite/driver/testglobals.py @@ -31,6 +31,9 @@ class TestConfig: self.run_only_some_tests = False self.only = set() + # Don't fail on out-of-tolerance stat failures + self.ignore_perf_failures = False + # Accept new output which differs from the sample? self.accept = False self.accept_platform = False diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py index c2838ae5bf..2375ea8c20 100644 --- a/testsuite/driver/testlib.py +++ b/testsuite/driver/testlib.py @@ -1539,7 +1539,7 @@ def check_stats(name: TestName, # If any metric fails then the test fails. # Note, the remaining metrics are still run so that # a complete list of changes can be presented to the user. - if metric_result.passFail == 'fail': + if metric_result.passFail == 'fail' and not config.ignore_perf_failures: result = metric_result return result |