summaryrefslogtreecommitdiff
path: root/testsuite/driver/perf_notes.py
diff options
context:
space:
mode:
Diffstat (limited to 'testsuite/driver/perf_notes.py')
-rw-r--r--testsuite/driver/perf_notes.py21
1 files changed, 14 insertions, 7 deletions
diff --git a/testsuite/driver/perf_notes.py b/testsuite/driver/perf_notes.py
index b94d4568a5..1e5f19a719 100644
--- a/testsuite/driver/perf_notes.py
+++ b/testsuite/driver/perf_notes.py
@@ -85,7 +85,7 @@ def test_cmp(full_name, field, val, expected, dev=20):
# Corresponds to 'all' setting for metric parameter in collect_stats function.
testing_metrics = ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_used']
-# Defaults to "test everything, only break on extreme cases, not a compiler stats test"
+# Defaults to "test everything, and only break on extreme cases"
#
# The inputs to this function are slightly interesting:
# metric can be either:
@@ -93,19 +93,26 @@ testing_metrics = ['bytes allocated', 'peak_megabytes_allocated', 'max_bytes_use
# - The specific metric one wants to use in the test.
# - A list of the metrics one wants to use in the test.
#
-# deviation defaults to 20% because the goal is correctness over performance.
+# Deviation defaults to 20% because the goal is correctness over performance.
# The testsuite should avoid breaking when there is not an actual error.
# Instead, the testsuite should notify of regressions in a non-breaking manner.
#
-# 'compiler' is somewhat of an unfortunate name.
+# collect_compiler_stats is used when the metrics collected are about the compiler.
+# collect_stats is used in the majority case when the metrics to be collected
+# are about the performance of the runtime code generated by the compiler.
+def collect_compiler_stats(metric='all',deviation=20):
+ return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m,d, True)
+
+def collect_stats(metric='all', deviation=20):
+ return lambda name, opts, m=metric, d=deviation: _collect_stats(name, opts, m, d)
+
+# 'is_compiler_stats_test' is somewhat of an unfortunate name.
# If the boolean is set to true, it indicates that this test is one that
# measures the performance numbers of the compiler.
# As this is a fairly rare case in the testsuite, it defaults to false to
# indicate that it is a 'normal' performance test.
-def collect_stats(metric='all', deviation=20, compiler=False):
- return lambda name, opts, m=metric, d=deviation, c=compiler: _collect_stats(name, opts, m, d, c)
-
-def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test):
+# This is an internal function that is used only in the implementation.
+def _collect_stats(name, opts, metric, deviation, is_compiler_stats_test=False):
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
# my_framework_fail(name, 'bad_name', 'This test has an invalid name')
my_failBecause('This test has an invalid name.')