summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Collins <robertc@robertcollins.net>2013-04-11 23:20:12 +1200
committerRobert Collins <robertc@robertcollins.net>2013-04-11 23:20:12 +1200
commitb1170816e23a7525bbc41729096d6121f4ba52f5 (patch)
tree861b4144557d78b4c8a4babfed7bc37f2358940f
parent5b1485becf12450e954902ed0fc94762eb97443d (diff)
downloadtestrepository-b1170816e23a7525bbc41729096d6121f4ba52f5.tar.gz
Consolidate on the StreamResult API for make_results' return values.
-rw-r--r--testrepository/tests/commands/test_failing.py4
-rw-r--r--testrepository/tests/commands/test_last.py8
-rw-r--r--testrepository/tests/commands/test_load.py4
-rw-r--r--testrepository/tests/ui/test_cli.py21
-rw-r--r--testrepository/ui/__init__.py34
-rw-r--r--testrepository/ui/cli.py57
-rw-r--r--testrepository/ui/model.py24
7 files changed, 52 insertions, 100 deletions
diff --git a/testrepository/tests/commands/test_failing.py b/testrepository/tests/commands/test_failing.py
index 1e25e87..a37f326 100644
--- a/testrepository/tests/commands/test_failing.py
+++ b/testrepository/tests/commands/test_failing.py
@@ -57,14 +57,14 @@ class TestCommand(ResourcedTestCase):
('summary', False, 1, None, Wildcard, None, [('id', 0, None), ('failures', 1, None)])],
ui.outputs)
suite = ui.outputs[0][1]
- result = testtools.TestResult()
+ result = testtools.StreamSummary()
result.startTestRun()
try:
suite.run(result)
finally:
result.stopTestRun()
self.assertEqual(1, result.testsRun)
- self.assertEqual(1, len(result.failures))
+ self.assertEqual(1, len(result.errors))
def test_with_subunit_shows_subunit_stream(self):
ui, cmd = self.get_test_ui_and_cmd(options=[('subunit', True)])
diff --git a/testrepository/tests/commands/test_last.py b/testrepository/tests/commands/test_last.py
index 39c20dc..672051e 100644
--- a/testrepository/tests/commands/test_last.py
+++ b/testrepository/tests/commands/test_last.py
@@ -54,13 +54,13 @@ class TestCommand(ResourcedTestCase):
[('id', id, None), ('failures', 1, None)])],
ui.outputs)
suite = ui.outputs[0][1]
- result = testtools.TestResult()
+ result = testtools.StreamSummary()
result.startTestRun()
try:
suite.run(result)
finally:
result.stopTestRun()
- self.assertEqual(1, len(result.failures))
+ self.assertEqual(1, len(result.errors))
self.assertEqual(2, result.testsRun)
def _add_run(self, repo):
@@ -85,13 +85,13 @@ class TestCommand(ResourcedTestCase):
[('id', id, None), ('failures', 1, 0)])],
ui.outputs)
suite = ui.outputs[0][1]
- result = testtools.TestResult()
+ result = testtools.StreamSummary()
result.startTestRun()
try:
suite.run(result)
finally:
result.stopTestRun()
- self.assertEqual(1, len(result.failures))
+ self.assertEqual(1, len(result.errors))
self.assertEqual(2, result.testsRun)
def test_shows_subunit_stream(self):
diff --git a/testrepository/tests/commands/test_load.py b/testrepository/tests/commands/test_load.py
index 1360e41..ed564c8 100644
--- a/testrepository/tests/commands/test_load.py
+++ b/testrepository/tests/commands/test_load.py
@@ -173,14 +173,14 @@ class TestCommandLoad(ResourcedTestCase):
('summary', False, 1, None, Wildcard, None,
[('id', 0, None), ('failures', 1, None)])],
ui.outputs)
- result = testtools.TestResult()
+ result = testtools.StreamSummary()
result.startTestRun()
try:
suite.run(result)
finally:
result.stopTestRun()
self.assertEqual(1, result.testsRun)
- self.assertEqual(1, len(result.failures))
+ self.assertEqual(1, len(result.errors))
def test_load_new_shows_test_skips(self):
if v2_avail:
diff --git a/testrepository/tests/ui/test_cli.py b/testrepository/tests/ui/test_cli.py
index cdde5ec..06a3dc4 100644
--- a/testrepository/tests/ui/test_cli.py
+++ b/testrepository/tests/ui/test_cli.py
@@ -341,29 +341,19 @@ class TestCLITestResult(TestCase):
cli.CLITestResult(cli.UI(None, None, None, None), stream, lambda: None)
self.assertEqual('', stream.getvalue())
- def _unwrap(self, result):
- """Unwrap result to get to the CLI result object."""
- return result.decorated.decorated
-
def test_format_error(self):
# CLITestResult formats errors by giving them a big fat line, a title
# made up of their 'label' and the name of the test, another different
# big fat line, and then the actual error itself.
- result = self._unwrap(self.make_result()[0])
+ result = self.make_result()[0]
error = result._format_error('label', self, 'error text')
expected = '%s%s: %s\n%s%s' % (
result.sep1, 'label', self.id(), result.sep2, 'error text')
self.assertThat(error, DocTestMatches(expected))
def test_format_error_includes_tags(self):
- result1 = self.make_result()
- result = self._unwrap(result1[0])
- #result1.startTestRun()
- #result1.status(test_id=self.id(), test_status='fail', eof=True,
- # test_tags=set(['foo']), file_name='traceback',
- # mime_type='test/plain;charset=utf8', file_bytes=b'error text')
- result.tags(set(['foo']), set())
- error = result._format_error('label', self, 'error text')
+ result = self.make_result()[0]
+ error = result._format_error('label', self, 'error text', set(['foo']))
expected = '%s%s: %s\ntags: foo\n%s%s' % (
result.sep1, 'label', self.id(), result.sep2, 'error text')
self.assertThat(error, DocTestMatches(expected))
@@ -379,10 +369,9 @@ class TestCLITestResult(TestCase):
result.status(test_id=self.id(), test_status='fail', eof=True,
file_name='traceback', mime_type='text/plain;charset=utf8',
file_bytes=error_text.encode('utf8'))
- result1 = self._unwrap(result)
self.assertThat(
stream.getvalue(),
- DocTestMatches(result1._format_error('FAIL', self, error_text)))
+ DocTestMatches(result._format_error('FAIL', self, error_text)))
def test_addFailure_handles_string_encoding(self):
# CLITestResult.addFailure outputs the given error handling non-ascii
@@ -433,6 +422,6 @@ FAIL: fail
tags: worker-0
----------------------------------------------------------------------
Ran 1 tests
-FAILED (id=None, failures=1)
+FAILED (id=None, failures=1, skips=1)
""", stream.getvalue())
diff --git a/testrepository/ui/__init__.py b/testrepository/ui/__init__.py
index 2da1f98..3ce37c2 100644
--- a/testrepository/ui/__init__.py
+++ b/testrepository/ui/__init__.py
@@ -22,7 +22,7 @@ See AbstractUI for details on what UI classes should do and are responsible
for.
"""
-from testtools import TestResult
+from testtools import StreamResult
from testrepository.results import SummarizingResult
from testrepository.utils import timedelta_to_seconds
@@ -189,7 +189,7 @@ class AbstractUI(object):
raise NotImplementedError(self.subprocess_Popen)
-class BaseUITestResult(TestResult):
+class BaseUITestResult(StreamResult):
"""An abstract test result used with the UI.
AbstractUI.make_result probably wants to return an object like this.
@@ -205,7 +205,7 @@ class BaseUITestResult(TestResult):
self.ui = ui
self.get_id = get_id
self._previous_run = previous_run
- self._first_time = None
+ self._summary = SummarizingResult()
def _get_previous_summary(self):
if self._previous_run is None:
@@ -224,49 +224,39 @@ class BaseUITestResult(TestResult):
"""
if self.ui.options.quiet:
return
- time = self.get_time_taken()
+ time = self._summary.get_time_taken()
time_delta = None
num_tests_run_delta = None
num_failures_delta = None
values = [('id', run_id, None)]
- failures = self.get_num_failures()
+ failures = self._summary.get_num_failures()
previous_summary = self._get_previous_summary()
if failures:
if previous_summary:
num_failures_delta = failures - previous_summary.get_num_failures()
values.append(('failures', failures, num_failures_delta))
if previous_summary:
- num_tests_run_delta = self.testsRun - previous_summary.testsRun
+ num_tests_run_delta = self._summary.testsRun - previous_summary.testsRun
if time:
previous_time_taken = previous_summary.get_time_taken()
if previous_time_taken:
time_delta = time - previous_time_taken
- skips = sum(map(len, self.skip_reasons.values()))
+ skips = len(self._summary.skipped)
if skips:
values.append(('skips', skips, None))
self.ui.output_summary(
- not bool(failures), self.testsRun, num_tests_run_delta,
+ not bool(failures), self._summary.testsRun, num_tests_run_delta,
time, time_delta, values)
def startTestRun(self):
super(BaseUITestResult, self).startTestRun()
- self._first_time = None
+ self._summary.startTestRun()
def stopTestRun(self):
super(BaseUITestResult, self).stopTestRun()
run_id = self.get_id()
+ self._summary.stopTestRun()
self._output_summary(run_id)
- def get_num_failures(self):
- return len(self.failures) + len(self.errors)
-
- def time(self, a_time):
- if self._first_time is None:
- self._first_time = a_time
- super(BaseUITestResult, self).time(a_time)
-
- def get_time_taken(self):
- now = self._now()
- if None in (self._first_time, now):
- return None
- return timedelta_to_seconds(now - self._first_time)
+ def status(self, *args, **kwargs):
+ self._summary.status(*args, **kwargs)
diff --git a/testrepository/ui/cli.py b/testrepository/ui/cli.py
index 9a41962..e5cd9ff 100644
--- a/testrepository/ui/cli.py
+++ b/testrepository/ui/cli.py
@@ -46,9 +46,11 @@ class CLITestResult(ui.BaseUITestResult):
self.sep1 = _u('=' * 70 + '\n')
self.sep2 = _u('-' * 70 + '\n')
self.filter_tags = filter_tags or frozenset()
+ self.filterable_states = set(['success', 'uxsuccess', 'xfail', 'skip'])
- def _format_error(self, label, test, error_text):
- tags = _u(' ').join(self.current_tags)
+ def _format_error(self, label, test, error_text, test_tags=None):
+ test_tags = test_tags or ()
+ tags = _u(' ').join(test_tags)
if tags:
tags = _u('tags: %s\n') % tags
return _u('').join([
@@ -59,38 +61,21 @@ class CLITestResult(ui.BaseUITestResult):
error_text,
])
- def addError(self, test, err=None, details=None):
- super(CLITestResult, self).addError(test, err=err, details=details)
- self.stream.write(self._format_error(_u('ERROR'), *(self.errors[-1])))
-
- def addFailure(self, test, err=None, details=None):
- super(CLITestResult, self).addFailure(test, err=err, details=details)
- self.stream.write(self._format_error(_u('FAIL'), *(self.failures[-1])))
-
- def addSuccess(self, test, details=None):
- if self.current_tags.intersection(self.filter_tags):
- self.testsRun -= 1
- return
- super(CLITestResult, self).addSuccess(test, details=details)
-
- def addUnexpectedSuccess(self, test, details=None):
- if self.current_tags.intersection(self.filter_tags):
- self.testsRun -= 1
- return
- super(CLITestResult, self).addUnexpectedSuccess(test, details=details)
-
- def addExpectedFailure(self, test, err=None, details=None):
- if self.current_tags.intersection(self.filter_tags):
- self.testsRun -= 1
- return
- super(CLITestResult, self).addExpectedFailure(
- test, err=err, details=details)
-
- def addSkip(self, test, reason=None, details=None):
- if self.current_tags.intersection(self.filter_tags):
- self.testsRun -= 1
+ def status(self, test_id=None, test_status=None, test_tags=None,
+ runnable=True, file_name=None, file_bytes=None, eof=False,
+ mime_type=None, route_code=None, timestamp=None):
+ super(CLITestResult, self).status(test_id=test_id,
+ test_status=test_status, test_tags=test_tags, runnable=runnable,
+ file_name=file_name, file_bytes=file_bytes, eof=eof,
+ mime_type=mime_type, route_code=route_code, timestamp=timestamp)
+ if test_status == 'fail':
+ self.stream.write(
+ self._format_error(_u('FAIL'), *(self._summary.errors[-1]),
+ test_tags=test_tags))
+ if test_status not in self.filterable_states:
return
- super(CLITestResult, self).addSkip(test, reason=reason, details=details)
+ if test_tags and test_tags.intersection(self.filter_tags):
+ self._summary.testsRun -= 1
class UI(ui.AbstractUI):
@@ -127,12 +112,12 @@ class UI(ui.AbstractUI):
summary.stopTestRun()
return result, summary
else:
+ # Apply user defined transforms.
filter_tags = test_command.get_filter_tags()
output = CLITestResult(self, get_id, self._stdout, previous_run,
filter_tags=filter_tags)
- # Apply user defined transforms.
- result = StreamToExtendedDecorator(output)
- return result, output
+ summary = output._summary
+ return output, summary
def output_error(self, error_tuple):
if 'TESTR_PDB' in os.environ:
diff --git a/testrepository/ui/model.py b/testrepository/ui/model.py
index b49e65c..6a30af8 100644
--- a/testrepository/ui/model.py
+++ b/testrepository/ui/model.py
@@ -58,26 +58,15 @@ class TestResultModel(ui.BaseUITestResult):
super(TestResultModel, self).__init__(ui, get_id, previous_run)
self._suite = TestSuiteModel()
- def startTest(self, test):
- super(TestResultModel, self).startTest(test)
- self._suite.recordResult('startTest', test)
-
- def stopTest(self, test):
- self._suite.recordResult('stopTest', test)
-
def status(self, test_id=None, test_status=None, test_tags=None,
runnable=True, file_name=None, file_bytes=None, eof=False,
mime_type=None, route_code=None, timestamp=None):
+ super(TestResultModel, self).status(test_id=test_id,
+ test_status=test_status, test_tags=test_tags, runnable=runnable,
+ file_name=file_name, file_bytes=file_bytes, eof=eof,
+ mime_type=mime_type, route_code=route_code, timestamp=timestamp)
self._suite.recordResult('status', test_id, test_status)
- def addError(self, test, *args):
- super(TestResultModel, self).addError(test, *args)
- self._suite.recordResult('addError', test, *args)
-
- def addFailure(self, test, *args):
- super(TestResultModel, self).addFailure(test, *args)
- self._suite.recordResult('addFailure', test, *args)
-
def stopTestRun(self):
if self.ui.options.quiet:
return
@@ -156,9 +145,8 @@ class UI(ui.AbstractUI):
yield BytesIO(stream_value)
def make_result(self, get_id, test_command, previous_run=None):
- summary = TestResultModel(self, get_id, previous_run)
- result = testtools.StreamToExtendedDecorator(summary)
- return result, summary
+ result = TestResultModel(self, get_id, previous_run)
+ return result, result._summary
def output_error(self, error_tuple):
self.outputs.append(('error', error_tuple))