summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-08-02 15:36:06 -0400
committerBen Gamari <ben@smart-cactus.org>2019-08-02 15:39:54 -0400
commit123c9981a79b35387efde5f445c8f56e27a2dcfb (patch)
tree5052974f7e7ea040068070579d4e36f30c1f1f14
parent5e04841c4641e2249066614065053166657e3eb4 (diff)
downloadhaskell-wip/rework-fragile-tests.tar.gz
testsuite: Rework tracking of fragile testswip/rework-fragile-tests
Breaks fragile tests into two groups, allowing us to easily preserve stdout/stderr of failing fragile tests.
-rw-r--r--testsuite/driver/junit.py3
-rw-r--r--testsuite/driver/testglobals.py3
-rw-r--r--testsuite/driver/testlib.py19
3 files changed, 18 insertions, 7 deletions
diff --git a/testsuite/driver/junit.py b/testsuite/driver/junit.py
index 180a81ab15..638577d279 100644
--- a/testsuite/driver/junit.py
+++ b/testsuite/driver/junit.py
@@ -19,7 +19,8 @@ def junit(t: TestRun) -> ET.ElementTree:
for res_type, group in [('stat failure', t.unexpected_stat_failures),
('unexpected failure', t.unexpected_failures),
('unexpected pass', t.unexpected_passes),
- ('fragile', t.fragile_results)]:
+ ('fragile failure', t.fragile_failures),
+ ('fragile pass', t.fragile_passes)]:
for tr in group:
testcase = ET.SubElement(testsuite, 'testcase',
classname = tr.way,
diff --git a/testsuite/driver/testglobals.py b/testsuite/driver/testglobals.py
index b7d668ec66..9895cd3a61 100644
--- a/testsuite/driver/testglobals.py
+++ b/testsuite/driver/testglobals.py
@@ -236,7 +236,8 @@ class TestRun:
self.unexpected_stat_failures = [] # type: List[TestResult]
# Results from tests that have been marked as fragile
- self.fragile_results = [] # type: List[TestResult]
+ self.fragile_passes = [] # type: List[TestResult]
+ self.fragile_failures = [] # type: List[TestResult]
# List of all metrics measured in this test run.
# [(change, PerfStat)] where change is one of the MetricChange
diff --git a/testsuite/driver/testlib.py b/testsuite/driver/testlib.py
index 116d4d9e22..e03a2c54aa 100644
--- a/testsuite/driver/testlib.py
+++ b/testsuite/driver/testlib.py
@@ -1008,7 +1008,12 @@ def do_test(name: TestName,
if way in opts.fragile_ways:
if_verbose(1, '*** fragile test %s resulted in %s' % (full_name, passFail))
- t.fragile_results.append(TestResult(directory, name, 'fragile %s' % passFail, way))
+ if passFail == 'pass':
+ t.fragile_passes.append(TestResult(directory, name, 'fragile', way))
+ else:
+ t.fragile_failures.append(TestResult(directory, name, 'fragile', way,
+ stdout=result.stdout,
+ stderr=result.stderr))
elif passFail == 'pass':
if _expect_pass(way):
t.expected_passes.append(TestResult(directory, name, "", way))
@@ -2363,7 +2368,7 @@ def summary(t: TestRun, file: TextIO, short=False, color=False) -> None:
+ ' unexpected failures\n'
+ repr(len(t.unexpected_stat_failures)).rjust(8)
+ ' unexpected stat failures\n'
- + repr(len(t.fragile_results)).rjust(8)
+ + repr(len(t.fragile_failures) + len(t.fragile_passes)).rjust(8)
+ ' fragile tests\n'
+ '\n')
@@ -2387,9 +2392,13 @@ def summary(t: TestRun, file: TextIO, short=False, color=False) -> None:
file.write('Framework warnings:\n')
printTestInfosSummary(file, t.framework_warnings)
- if t.fragile_results:
- file.write('Fragile tests:\n')
- printTestInfosSummary(file, t.fragile_results)
+ if t.fragile_passes:
+ file.write('Fragile test passes:\n')
+ printTestInfosSummary(file, t.fragile_passes)
+
+ if t.fragile_failures:
+ file.write('Fragile test failures:\n')
+ printTestInfosSummary(file, t.fragile_failures)
if stopping():
file.write('WARNING: Testsuite run was terminated early\n')