diff options
author | Chris Dent <chdent@redhat.com> | 2015-02-11 14:39:10 +0000 |
---|---|---|
committer | Chris Dent <chdent@redhat.com> | 2015-02-16 15:59:18 +0000 |
commit | e29ec710eff862c0be03755e87b25ddb048d6d2a (patch) | |
tree | b6c00ccac35de663e52d3ee349acde4d32803440 | |
parent | c70c26d05c95778420bc0032d5653e06e941d79c (diff) | |
download | tempest-lib-e29ec710eff862c0be03755e87b25ddb048d6d2a.tar.gz |
Summarize expected failures
The incoming stream has the xfail data but count_tests was not
matching on the xfail status. Instead it was matching on the
fragment 'fail' meaning that expected failures were counted as
such. By bounding the regular expressions it is possible to get more
specific results will still leaving the count_tests method flexible
for other users where fragments would be useful.
Counts of 'uxsuccess' are also summarized as "Unexpected Success".
It's unclear how to effectively automate testing of this. Manual
testing returns the expected results.
Change-Id: I5b1458f9a98712ea3e424d2c9610b915055138af
-rwxr-xr-x | tempest_lib/cmd/subunit_trace.py | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/tempest_lib/cmd/subunit_trace.py b/tempest_lib/cmd/subunit_trace.py index 5ffaa7c..49e2a04 100755 --- a/tempest_lib/cmd/subunit_trace.py +++ b/tempest_lib/cmd/subunit_trace.py @@ -205,9 +205,12 @@ def print_summary(stream, elapsed_time): stream.write("\n======\nTotals\n======\n") stream.write("Ran: %s tests in %.4f sec.\n" % ( count_tests('status', '.*'), total_seconds(elapsed_time))) - stream.write(" - Passed: %s\n" % count_tests('status', 'success')) - stream.write(" - Skipped: %s\n" % count_tests('status', 'skip')) - stream.write(" - Failed: %s\n" % count_tests('status', 'fail')) + stream.write(" - Passed: %s\n" % count_tests('status', '^success$')) + stream.write(" - Skipped: %s\n" % count_tests('status', '^skip$')) + stream.write(" - Expected Fail: %s\n" % count_tests('status', '^xfail$')) + stream.write(" - Unexpected Success: %s\n" % count_tests('status', + '^uxsuccess$')) + stream.write(" - Failed: %s\n" % count_tests('status', '^fail$')) stream.write("Sum of execute time for each test: %.4f sec.\n" % run_time()) # we could have no results, especially as we filter out the process-codes |