summaryrefslogtreecommitdiff
path: root/testsuite/driver/runtests.py
diff options
context:
space:
mode:
Diffstat (limited to 'testsuite/driver/runtests.py')
-rw-r--r--testsuite/driver/runtests.py36
1 files changed, 8 insertions, 28 deletions
diff --git a/testsuite/driver/runtests.py b/testsuite/driver/runtests.py
index 73297dae46..247a5cc330 100644
--- a/testsuite/driver/runtests.py
+++ b/testsuite/driver/runtests.py
@@ -379,37 +379,18 @@ else:
new_metrics = [metric for (change, metric) in t.metrics if change == MetricChange.NewMetric]
if any(new_metrics):
if canGitStatus:
- reason = 'a baseline (expected value) cannot be recovered from' + \
- ' previous git commits. This may be due to HEAD having' + \
- ' new tests or having expected changes, the presence of' + \
- ' expected changes since the last run of the tests, and/or' + \
- ' the latest test run being too old.'
- fix = 'If the tests exist on the previous' + \
- ' commit (And are configured to run with the same ways),' + \
- ' then check out that commit and run the tests to generate' + \
- ' the missing metrics. Alternatively, a baseline may be' + \
- ' recovered from ci results once fetched (where origin' + \
- ' is the official ghc git repo):\n\n' + \
- spacing + 'git fetch ' + \
- 'https://gitlab.haskell.org/ghc/ghc-performance-notes.git' + \
- ' refs/notes/perf:refs/notes/' + Perf.CiNamespace
+ reason = 'the previous git commit doesn\'t have recorded metrics for the following tests.' + \
+ ' If the tests exist on the previous commit, then check it out and run the tests to generate the missing metrics.'
else:
- reason = "this is not a git repo so the previous git commit's" + \
- " metrics cannot be loaded from git notes:"
- fix = ""
+ reason = 'this is not a git repo so the previous git commit\'s metrics cannot be loaded from git notes:'
print()
- print(str_warn('Missing Baseline Metrics') + \
- ' these metrics trivially pass because ' + reason)
- print(spacing + (' ').join(set([metric.test for metric in new_metrics])))
- if fix != "":
- print()
- print(fix)
+ print(str_warn('New Metrics') + ' these metrics trivially pass because ' + reason)
+ print(spacing + ('\n' + spacing).join(set([metric.test for metric in new_metrics])))
# Inform of how to accept metric changes.
if (len(t.unexpected_stat_failures) > 0):
print()
- print(str_info("Some stats have changed") + " If this is expected, " + \
- "allow changes by appending the git commit message with this:")
+ print(str_info("Some stats have changed") + " If this is expected, allow changes by appending the git commit message with this:")
print('-' * 25)
print(Perf.allow_changes_string(t.metrics))
print('-' * 25)
@@ -425,9 +406,8 @@ else:
elif canGitStatus and any(stats):
if is_worktree_dirty():
print()
- print(str_warn('Performance Metrics NOT Saved') + \
- ' working tree is dirty. Commit changes or use ' + \
- '--metrics-file to save metrics to a file.')
+ print(str_warn('Working Tree is Dirty') + ' performance metrics will not be saved.' + \
+ ' Commit changes or use --metrics-file to save metrics to a file.')
else:
Perf.append_perf_stat(stats)