summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJonathan Lange <jml@canonical.com>2010-09-20 18:32:43 +0100
committerJonathan Lange <jml@canonical.com>2010-09-20 18:32:43 +0100
commit1db46a6f68fd08eade99ad84228a305a97da0db7 (patch)
tree66fad0efcbcae082eb53b6c8d00b541da70b0fb2
parent85ddf992483d7ce82c0cbe518bf2ed4c7b322621 (diff)
downloadtestrepository-1db46a6f68fd08eade99ad84228a305a97da0db7.tar.gz
Give the UI's TestResult object full responsibility for summing up the result
of the test,
-rw-r--r--testrepository/commands/failing.py21
-rw-r--r--testrepository/commands/last.py15
-rw-r--r--testrepository/commands/load.py19
-rw-r--r--testrepository/results.py17
-rw-r--r--testrepository/tests/__init__.py1
-rw-r--r--testrepository/tests/commands/test_failing.py21
-rw-r--r--testrepository/tests/commands/test_load.py9
-rw-r--r--testrepository/tests/ui/test_cli.py5
-rw-r--r--testrepository/ui/__init__.py41
-rw-r--r--testrepository/ui/cli.py11
-rw-r--r--testrepository/ui/model.py14
11 files changed, 120 insertions, 54 deletions
diff --git a/testrepository/commands/failing.py b/testrepository/commands/failing.py
index ac9774f..0fa962a 100644
--- a/testrepository/commands/failing.py
+++ b/testrepository/commands/failing.py
@@ -14,13 +14,13 @@
"""Show the current failures in the repository."""
-from cStringIO import StringIO
import optparse
-import subunit.test_results
from testtools import MultiTestResult, TestResult
from testrepository.commands import Command
+from testrepository.results import TestResultFilter
+
class failing(Command):
"""Show the current failures known by the repository.
@@ -50,12 +50,11 @@ class failing(Command):
else:
return 0
- def _make_result(self, evaluator):
+ def _make_result(self, repo, evaluator):
if self.ui.options.list:
return evaluator
- output_result = self.ui.make_result(lambda: None)
- filtered = subunit.test_results.TestResultFilter(
- output_result, filter_skip=True)
+ output_result = self.ui.make_result(repo.latest_id)
+ filtered = TestResultFilter(output_result, filter_skip=True)
return MultiTestResult(evaluator, filtered)
def run(self):
@@ -66,7 +65,7 @@ class failing(Command):
case = run.get_test()
failed = False
evaluator = TestResult()
- result = self._make_result(evaluator)
+ result = self._make_result(repo, evaluator)
result.startTestRun()
try:
case.run(result)
@@ -81,12 +80,4 @@ class failing(Command):
failing_tests = [
test for test, _ in evaluator.errors + evaluator.failures]
self.ui.output_tests(failing_tests)
- return result
- if self.ui.options.quiet:
- return result
- values = []
- failures = len(evaluator.failures) + len(evaluator.errors)
- if failures:
- values.append(('failures', failures))
- self.ui.output_values(values)
return result
diff --git a/testrepository/commands/last.py b/testrepository/commands/last.py
index 90d26c6..43228db 100644
--- a/testrepository/commands/last.py
+++ b/testrepository/commands/last.py
@@ -14,10 +14,9 @@
"""Show the last run loaded into a repository."""
-import subunit.test_results
-from testtools import MultiTestResult, TestResult
-
from testrepository.commands import Command
+from testrepository.results import TestResultFilter
+
class last(Command):
"""Show the last run loaded into a repository.
@@ -31,18 +30,14 @@ class last(Command):
run_id = repo.latest_id()
case = repo.get_test_run(run_id).get_test()
failed = False
- evaluator = TestResult()
- output_result = self.ui.make_result(lambda: None)
- filtered = subunit.test_results.TestResultFilter(
- output_result, filter_skip=True)
- result = MultiTestResult(evaluator, filtered)
+ output_result = self.ui.make_result(lambda: run_id)
+ result = TestResultFilter(output_result, filter_skip=True)
result.startTestRun()
try:
case.run(result)
finally:
result.stopTestRun()
- failed = not evaluator.wasSuccessful()
- self.output_run(run_id, evaluator)
+ failed = not result.wasSuccessful()
if failed:
return 1
else:
diff --git a/testrepository/commands/load.py b/testrepository/commands/load.py
index d646ca5..b46f32e 100644
--- a/testrepository/commands/load.py
+++ b/testrepository/commands/load.py
@@ -14,10 +14,12 @@
"""Load data into a repository."""
-import subunit.test_results
-from testtools import MultiTestResult, TestResult
+import subunit
+from testtools import MultiTestResult
from testrepository.commands import Command
+from testrepository.results import TestResultFilter
+
class load(Command):
"""Load a subunit stream into a repository.
@@ -32,22 +34,21 @@ class load(Command):
path = self.ui.here
repo = self.repository_factory.open(path)
failed = False
+ run_id = None
for stream in self.ui.iter_streams('subunit'):
inserter = repo.get_inserter()
- evaluator = TestResult()
- output_result = self.ui.make_result(lambda: None)
- filtered = subunit.test_results.TestResultFilter(
- output_result, filter_skip=True)
+ output_result = self.ui.make_result(lambda: run_id)
+ # XXX: We want to *count* skips, but not show them.
+ filtered = TestResultFilter(output_result, filter_skip=False)
case = subunit.ProtocolTestCase(stream)
filtered.startTestRun()
inserter.startTestRun()
try:
- case.run(MultiTestResult(inserter, evaluator, filtered))
+ case.run(MultiTestResult(inserter, filtered))
finally:
run_id = inserter.stopTestRun()
filtered.stopTestRun()
- failed = failed or not evaluator.wasSuccessful()
- self.output_run(run_id, evaluator)
+ failed = failed or not filtered.wasSuccessful()
if failed:
return 1
else:
diff --git a/testrepository/results.py b/testrepository/results.py
new file mode 100644
index 0000000..2131460
--- /dev/null
+++ b/testrepository/results.py
@@ -0,0 +1,17 @@
+from subunit import test_results
+
+
+class TestResultFilter(test_results.TestResultFilter):
+ """Test result filter."""
+
+ def _filtered(self):
+ super(TestResultFilter, self)._filtered()
+ # XXX: This is really crappy. It assumes that the test result we
+ # actually care about is decorated twice. Probably the more correct
+ # thing to do is fix subunit so that incrementing 'testsRun' on a test
+ # result increments them on the decorated test result.
+ self.decorated.decorated.testsRun += 1
+
+ def addSkip(self, test, reason=None, details=None):
+ super(TestResultFilter, self).addSkip(test, reason=reason, details=details)
+ self.decorated.decorated.skip_reasons[]
diff --git a/testrepository/tests/__init__.py b/testrepository/tests/__init__.py
index 6819714..3aeb6a6 100644
--- a/testrepository/tests/__init__.py
+++ b/testrepository/tests/__init__.py
@@ -59,6 +59,7 @@ def test_suite():
'matchers',
'monkeypatch',
'repository',
+ 'results',
'setup',
'stubpackage',
'testr',
diff --git a/testrepository/tests/commands/test_failing.py b/testrepository/tests/commands/test_failing.py
index de0a9a6..fea9a9a 100644
--- a/testrepository/tests/commands/test_failing.py
+++ b/testrepository/tests/commands/test_failing.py
@@ -51,7 +51,7 @@ class TestCommand(ResourcedTestCase):
# We should have seen test outputs (of the failure) and summary data.
self.assertEqual([
('results', Wildcard),
- ('values', [('failures', 1)])],
+ ('values', [('id', 0), ('tests', 1), ('failures', 1)])],
ui.outputs)
suite = ui.outputs[0][1]
result = testtools.TestResult()
@@ -114,6 +114,16 @@ class TestCommand(ResourcedTestCase):
open = cmd.repository_factory.open
def decorate_open_with_get_failing(url):
repo = open(url)
+ inserter = repo.get_inserter()
+ inserter.startTestRun()
+ class Cases(ResourcedTestCase):
+ def failing(self):
+ self.fail('foo')
+ def ok(self):
+ pass
+ Cases('failing').run(inserter)
+ Cases('ok').run(inserter)
+ inserter.stopTestRun()
orig = repo.get_failing
def get_failing():
calls.append(True)
@@ -122,5 +132,12 @@ class TestCommand(ResourcedTestCase):
return repo
cmd.repository_factory.open = decorate_open_with_get_failing
cmd.repository_factory.initialise(ui.here)
- self.assertEqual(0, cmd.execute())
+ self.assertEqual(1, cmd.execute())
self.assertEqual([True], calls)
+
+ # XXX: Need a test to show what happens when "failing" is called and there
+ # is no previous test run.
+
+ # XXX: Probably should have a test that demonstrates what happens when
+ # "failing" is called and there is a previous test run with no failures.
+
diff --git a/testrepository/tests/commands/test_load.py b/testrepository/tests/commands/test_load.py
index cd76498..180a193 100644
--- a/testrepository/tests/commands/test_load.py
+++ b/testrepository/tests/commands/test_load.py
@@ -98,7 +98,8 @@ class TestCommandLoad(ResourcedTestCase):
cmd.repository_factory.initialise(ui.here)
self.assertEqual(0, cmd.execute())
self.assertEqual(
- [('values', [('id', 0), ('tests', 1), ('skips', 1)])],
+ [('results', Wildcard),
+ ('values', [('id', 0), ('tests', 1), ('skips', 1)])],
ui.outputs)
def test_load_new_shows_test_summary_no_tests(self):
@@ -108,7 +109,9 @@ class TestCommandLoad(ResourcedTestCase):
cmd.repository_factory = memory.RepositoryFactory()
cmd.repository_factory.initialise(ui.here)
self.assertEqual(0, cmd.execute())
- self.assertEqual([('values', [('id', 0), ('tests', 0)])], ui.outputs)
+ self.assertEqual(
+ [('results', Wildcard), ('values', [('id', 0), ('tests', 0)])],
+ ui.outputs)
def test_load_new_shows_test_summary_per_stream(self):
# This may not be the final layout, but for now per-stream stats are
@@ -120,7 +123,9 @@ class TestCommandLoad(ResourcedTestCase):
cmd.repository_factory.initialise(ui.here)
self.assertEqual(0, cmd.execute())
self.assertEqual([
+ ('results', Wildcard),
('values', [('id', 0), ('tests', 0)]),
+ ('results', Wildcard),
('values', [('id', 1), ('tests', 0)])],
ui.outputs)
diff --git a/testrepository/tests/ui/test_cli.py b/testrepository/tests/ui/test_cli.py
index 86961aa..cb576b3 100644
--- a/testrepository/tests/ui/test_cli.py
+++ b/testrepository/tests/ui/test_cli.py
@@ -173,13 +173,14 @@ class TestCLITestResult(TestCase):
def make_result(self, stream=None):
if stream is None:
stream = StringIO()
- return cli.CLITestResult(stream)
+ ui = cli.UI([], None, stream, None)
+ return ui.make_result(lambda: None)
def test_initial_stream(self):
# CLITestResult.__init__ does not do anything to the stream it is
# given.
stream = StringIO()
- cli.CLITestResult(stream)
+ cli.CLITestResult(cli.UI(None, None, None, None), stream, lambda: None)
self.assertEqual('', stream.getvalue())
def test_format_error(self):
diff --git a/testrepository/ui/__init__.py b/testrepository/ui/__init__.py
index 8e10d5f..d59ac74 100644
--- a/testrepository/ui/__init__.py
+++ b/testrepository/ui/__init__.py
@@ -22,6 +22,9 @@ See AbstractUI for details on what UI classes should do and are responsible
for.
"""
+from testtools import TestResult
+
+
class AbstractUI(object):
"""The base class for UI objects, this providers helpers and the interface.
@@ -159,3 +162,41 @@ class AbstractUI(object):
"""
# This might not be the right place.
raise NotImplementedError(self.subprocess_Popen)
+
+
+class BaseUITestResult(TestResult):
+ """An abstract test result used with the UI.
+
+ AbstractUI.make_result probably wants to return an object like this.
+ """
+
+ def __init__(self, ui, get_id):
+ """Construct an `AbstractUITestResult`.
+
+ :param ui: The UI this result is associated with.
+ :param get_id: A nullary callable that returns the id of the test run.
+ """
+ super(BaseUITestResult, self).__init__()
+ self.ui = ui
+ self.get_id = get_id
+
+ def _output_run(self, run_id):
+ """Output a test run.
+
+ :param run_id: The run id.
+ """
+ if self.ui.options.quiet:
+ return
+ values = [('id', run_id), ('tests', self.testsRun)]
+ failures = len(self.failures) + len(self.errors)
+ if failures:
+ values.append(('failures', failures))
+ skips = sum(map(len, self.skip_reasons.itervalues()))
+ if skips:
+ values.append(('skips', skips))
+ self.ui.output_values(values)
+
+ def stopTestRun(self):
+ super(BaseUITestResult, self).stopTestRun()
+ run_id = self.get_id()
+ self._output_run(run_id)
diff --git a/testrepository/ui/cli.py b/testrepository/ui/cli.py
index 126637d..da9c6de 100644
--- a/testrepository/ui/cli.py
+++ b/testrepository/ui/cli.py
@@ -18,16 +18,15 @@ from optparse import OptionParser
import os
import sys
-import testtools
-
from testrepository import ui
-class CLITestResult(testtools.TestResult):
+
+class CLITestResult(ui.BaseUITestResult):
"""A TestResult for the CLI."""
- def __init__(self, stream):
+ def __init__(self, ui, get_id, stream):
"""Construct a CLITestResult writing to stream."""
- super(CLITestResult, self).__init__()
+ super(CLITestResult, self).__init__(ui, get_id)
self.stream = stream
self.sep1 = '=' * 70 + '\n'
self.sep2 = '-' * 70 + '\n'
@@ -69,7 +68,7 @@ class UI(ui.AbstractUI):
yield self._stdin
def make_result(self, get_id):
- return CLITestResult(self._stdout)
+ return CLITestResult(self, get_id, self._stdout)
def output_error(self, error_tuple):
self._stderr.write(str(error_tuple[1]) + '\n')
diff --git a/testrepository/ui/model.py b/testrepository/ui/model.py
index b2cb38c..fa3e0d2 100644
--- a/testrepository/ui/model.py
+++ b/testrepository/ui/model.py
@@ -18,7 +18,6 @@ from cStringIO import StringIO
import optparse
from testrepository import ui
-from testtools import TestResult
class ProcessModel(object):
@@ -46,14 +45,14 @@ class TestSuiteModel(object):
getattr(result, method)(*args)
-class TestResultModel(TestResult):
+class TestResultModel(ui.BaseUITestResult):
- def __init__(self, ui):
- super(TestResultModel, self).__init__()
- self.ui = ui
+ def __init__(self, ui, get_id):
+ super(TestResultModel, self).__init__(ui, get_id)
self._suite = TestSuiteModel()
def startTest(self, test):
+ super(TestResultModel, self).startTest(test)
self._suite.recordResult('startTest', test)
def stopTest(self, test):
@@ -68,11 +67,10 @@ class TestResultModel(TestResult):
self._suite.recordResult('addFailure', test, *args)
def stopTestRun(self):
- if self.wasSuccessful():
- return
if self.ui.options.quiet:
return
self.ui.outputs.append(('results', self._suite))
+ return super(TestResultModel, self).stopTestRun()
class UI(ui.AbstractUI):
@@ -132,7 +130,7 @@ class UI(ui.AbstractUI):
yield StringIO(stream_bytes)
def make_result(self, get_id):
- return TestResultModel(self)
+ return TestResultModel(self, get_id)
def output_error(self, error_tuple):
self.outputs.append(('error', error_tuple))