summaryrefslogtreecommitdiff
path: root/testrepository/commands/failing.py
blob: 0fa962a4ff0fc0cd5578b89ca8502fea4fc7a1e3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
#
# Copyright (c) 2010 Testrepository Contributors
# 
# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
# license at the users choice. A copy of both licenses are available in the
# project source as Apache-2.0 and BSD. You may not use this file except in
# compliance with one of these two licences.
# 
# Unless required by applicable law or agreed to in writing, software
# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
# license you chose for the specific language governing permissions and
# limitations under that license.

"""Show the current failures in the repository."""

import optparse

from testtools import MultiTestResult, TestResult

from testrepository.commands import Command
from testrepository.results import TestResultFilter


class failing(Command):
    """Show the current failures known by the repository.
    
    Today this is the failures from the most recent run, but once partial
    and full runs are understood it will be all the failures from the last
    full run combined with any failures in subsequent partial runs, minus any
    passes that have occured in a run more recent than a given failure. Deleted
    tests will only be detected on full runs with this approach.
    """

    options = [
        optparse.Option(
            "--subunit", action="store_true",
            default=False, help="Show output as a subunit stream."),
        optparse.Option(
            "--list", action="store_true",
            default=False, help="Show only a list of failing tests."),
        ]

    def _list_subunit(self, run):
        # TODO only failing tests.
        stream = run.get_subunit_stream()
        self.ui.output_stream(stream)
        if stream:
            return 1
        else:
            return 0

    def _make_result(self, repo, evaluator):
        if self.ui.options.list:
            return evaluator
        output_result = self.ui.make_result(repo.latest_id)
        filtered = TestResultFilter(output_result, filter_skip=True)
        return MultiTestResult(evaluator, filtered)

    def run(self):
        repo = self.repository_factory.open(self.ui.here)
        run = repo.get_failing()
        if self.ui.options.subunit:
            return self._list_subunit(run)
        case = run.get_test()
        failed = False
        evaluator = TestResult()
        result = self._make_result(repo, evaluator)
        result.startTestRun()
        try:
            case.run(result)
        finally:
            result.stopTestRun()
        failed = not evaluator.wasSuccessful()
        if failed:
            result = 1
        else:
            result = 0
        if self.ui.options.list:
            failing_tests = [
                test for test, _ in evaluator.errors + evaluator.failures]
            self.ui.output_tests(failing_tests)
        return result