summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-09-24 13:09:44 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-09-24 13:09:44 +0200
commitdc6262b587c71c14e30d93e57ed812e36a79a33e (patch)
tree03ff986e7aa38bba0c0ef374f44fda52aff93f01 /Tools/Scripts/webkitpy
parent02e1fbbefd49229b102ef107bd70ce974a2d85fb (diff)
downloadqtwebkit-dc6262b587c71c14e30d93e57ed812e36a79a33e.tar.gz
Imported WebKit commit 6339232fec7f5d9984a33388aecfd2cbc7832053 (http://svn.webkit.org/repository/webkit/trunk@129343)
New snapshot with build fixes for latest qtbase
Diffstat (limited to 'Tools/Scripts/webkitpy')
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py67
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py329
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_results.py3
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/base.py17
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py25
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/test.py40
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py9
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py12
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py31
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftest_unittest.py10
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner.py5
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py26
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py1
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queries_unittest.py26
20 files changed, 246 insertions, 387 deletions
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
index a9df942da..69d2cc030 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
@@ -198,7 +198,7 @@ class LayoutTestRunner(object):
exp_str = got_str = 'SKIP'
expected = True
else:
- expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or test_failures.is_reftest_failure(result.failures))
+ expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or result.is_reftest)
exp_str = self._expectations.get_expectations_string(result.test_name)
got_str = self._expectations.expectation_to_string(result.type)
@@ -420,6 +420,7 @@ class Worker(object):
thread.start()
thread.join(thread_timeout_sec)
result = thread.result
+ failures = []
if thread.isAlive():
# If join() returned with the thread still running, the
# DumpRenderTree is completely hung and there's nothing
@@ -430,11 +431,12 @@ class Worker(object):
# that tradeoff in order to avoid losing the rest of this
# thread's results.
_log.error('Test thread hung: killing all DumpRenderTrees')
+ failures = [test_failures.FailureTimeout()]
driver.stop()
if not result:
- result = test_results.TestResult(test_input.test_name, failures=[], test_run_time=0)
+ result = test_results.TestResult(test_input.test_name, failures=failures, test_run_time=0)
return result
def _run_test_in_this_thread(self, test_input, stop_when_done):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
index 4dd2ae7ae..5062d5277 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
@@ -150,12 +150,19 @@ class LayoutTestRunnerTests(unittest.TestCase):
test = 'failures/expected/reftest.html'
expectations = TestExpectations(runner._port, tests=[test])
runner._expectations = expectations
+
result_summary = ResultSummary(expectations, [test], 1, set())
- result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()])
+ result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], is_reftest=True)
runner._update_summary_with_result(result_summary, result)
self.assertEquals(1, result_summary.expected)
self.assertEquals(0, result_summary.unexpected)
+ result_summary = ResultSummary(expectations, [test], 1, set())
+ result = TestResult(test_name=test, failures=[], is_reftest=True)
+ runner._update_summary_with_result(result_summary, result)
+ self.assertEquals(0, result_summary.expected)
+ self.assertEquals(1, result_summary.unexpected)
+
def test_servers_started(self):
def start_http_server(number_of_servers=None):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
index 1b8613f76..dcd24a446 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -192,5 +192,5 @@ class ResultSummaryTest(unittest.TestCase):
port = host.port_factory.get('test')
port._options.builder_name = 'dummy builder'
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), "failures/expected/wontfix.html"), "Dummy test contents")
- expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False, extra_tests=['failures/expected/wontfix.html'], extra_expectations='BUGX WONTFIX : failures/expected/wontfix.html = FAIL\n')
+ expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False, extra_tests=['failures/expected/wontfix.html'], extra_expectations='Bug(x) failures/expected/wontfix.html [ WontFix ]\n')
self.assertTrue(expected_results['tests']['failures']['expected']['wontfix.html']['wontfix'])
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
index edac8ebb3..09b4e1d9f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -94,7 +94,7 @@ class SingleTestRunner(object):
def run(self):
if self._reference_files:
if self._port.get_option('no_ref_tests') or self._options.reset_results:
- result = TestResult(self._test_name)
+ result = TestResult(self._test_name, is_reftest=True)
result.type = test_expectations.SKIP
return result
return self._run_reftest()
@@ -303,7 +303,7 @@ class SingleTestRunner(object):
assert(reference_output)
test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, test_output, reference_output, test_result.failures)
- return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr)
+ return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, is_reftest=True)
def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index 7890e980e..42b518f7f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -76,6 +76,8 @@ class TestExpectationParser(object):
TIMEOUT_EXPECTATION = 'timeout'
+ MISSING_BUG_WARNING = 'Test lacks BUG modifier.'
+
def __init__(self, port, full_test_list, allow_rebaseline_modifier):
self._port = port
self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
@@ -154,7 +156,7 @@ class TestExpectationParser(object):
parsed_specifiers.add(modifier)
if not expectation_line.parsed_bug_modifiers and not has_wontfix and not has_bugid:
- expectation_line.warnings.append('Test lacks BUG modifier.')
+ expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self._allow_rebaseline_modifier and self.REBASELINE_MODIFIER in modifiers:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
@@ -206,56 +208,6 @@ class TestExpectationParser(object):
if expectation_line.path in self._full_test_list:
expectation_line.matching_tests.append(expectation_line.path)
- # FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
- @classmethod
- def _tokenize_line(cls, filename, expectation_string, line_number):
- expectation_line = cls._tokenize_line_using_new_format(filename, expectation_string, line_number)
- if expectation_line.is_invalid():
- old_expectation_line = cls._tokenize_line_using_old_format(filename, expectation_string, line_number)
- if not old_expectation_line.is_invalid():
- return old_expectation_line
- return expectation_line
-
- @classmethod
- def _tokenize_line_using_old_format(cls, filename, expectation_string, line_number):
- """Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance.
-
- The format of a test expectation line is:
-
- [[<modifiers>] : <name> = <expectations>][ //<comment>]
-
- Any errant whitespace is not preserved.
-
- """
- expectation_line = TestExpectationLine()
- expectation_line.original_string = expectation_string
- expectation_line.line_number = line_number
- expectation_line.filename = filename
- comment_index = expectation_string.find("//")
- if comment_index == -1:
- comment_index = len(expectation_string)
- else:
- expectation_line.comment = expectation_string[comment_index + 2:]
-
- remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
- if len(remaining_string) == 0:
- return expectation_line
-
- parts = remaining_string.split(':')
- if len(parts) != 2:
- expectation_line.warnings.append("Missing a ':'" if len(parts) < 2 else "Extraneous ':'")
- else:
- test_and_expectation = parts[1].split('=')
- if len(test_and_expectation) != 2:
- expectation_line.warnings.append("Missing expectations" if len(test_and_expectation) < 2 else "Extraneous '='")
-
- if not expectation_line.is_invalid():
- expectation_line.modifiers = cls._split_space_separated(parts[0])
- expectation_line.name = test_and_expectation[0].strip()
- expectation_line.expectations = cls._split_space_separated(test_and_expectation[1])
-
- return expectation_line
-
# FIXME: Update the original modifiers and remove this once the old syntax is gone.
_configuration_tokens_list = [
'Mac', 'SnowLeopard', 'Lion', 'MountainLion',
@@ -286,8 +238,9 @@ class TestExpectationParser(object):
_inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
[('TEXT', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
+ # FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
@classmethod
- def _tokenize_line_using_new_format(cls, filename, expectation_string, line_number):
+ def _tokenize_line(cls, filename, expectation_string, line_number):
"""Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
The new format for a test expectation line is:
@@ -392,7 +345,13 @@ class TestExpectationParser(object):
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
- if not expectations:
+ if 'WONTFIX' in modifiers and 'SKIP' not in modifiers:
+ modifiers.append('SKIP')
+
+ if 'SKIP' in modifiers and expectations:
+ # FIXME: This is really a semantic warning and shouldn't be here. Remove when we drop the old syntax.
+ warnings.append('A test marked Skip or WontFix must not have other expectations.')
+ elif not expectations:
if 'SKIP' not in modifiers and 'REBASELINE' not in modifiers and 'SLOW' not in modifiers:
modifiers.append('SKIP')
expectations = ['PASS']
@@ -431,7 +390,7 @@ class TestExpectationLine(object):
self.warnings = []
def is_invalid(self):
- return len(self.warnings) > 0
+ return self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING]
def is_flaky(self):
return len(self.parsed_expectations) > 1
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index 8827cd76d..c5606071d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -67,11 +67,11 @@ class Base(unittest.TestCase):
def get_basic_expectations(self):
return """
-BUG_TEST : failures/expected/text.html = FAIL
-BUG_TEST WONTFIX SKIP : failures/expected/crash.html = CRASH
-BUG_TEST REBASELINE : failures/expected/missing_image.html = MISSING
-BUG_TEST WONTFIX : failures/expected/image_checksum.html = IMAGE
-BUG_TEST WONTFIX MAC : failures/expected/image.html = IMAGE
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/crash.html [ WontFix ]
+Bug(test) failures/expected/missing_image.html [ Rebaseline Missing ]
+Bug(test) failures/expected/image_checksum.html [ WontFix ]
+Bug(test) failures/expected/image.html [ WontFix Mac ]
"""
def parse_exp(self, expectations, overrides=None, is_lint_mode=False):
@@ -94,14 +94,14 @@ class BasicTests(Base):
def test_basic(self):
self.parse_exp(self.get_basic_expectations())
self.assert_exp('failures/expected/text.html', FAIL)
- self.assert_exp('failures/expected/image_checksum.html', IMAGE)
+ self.assert_exp('failures/expected/image_checksum.html', PASS)
self.assert_exp('passes/text.html', PASS)
self.assert_exp('failures/expected/image.html', PASS)
class MiscTests(Base):
def test_multiple_results(self):
- self.parse_exp('BUGX : failures/expected/text.html = FAIL CRASH')
+ self.parse_exp('Bug(x) failures/expected/text.html [ Crash Failure ]')
self.assertEqual(self._exp.get_expectations(
self.get_test('failures/expected/text.html')),
set([FAIL, CRASH]))
@@ -136,15 +136,13 @@ class MiscTests(Base):
# This test checks unknown tests are not present in the
# expectations and that known test part of a test category is
# present in the expectations.
- exp_str = """
-BUGX WONTFIX : failures/expected = IMAGE
-"""
+ exp_str = 'Bug(x) failures/expected [ WontFix ]'
self.parse_exp(exp_str)
test_name = 'failures/expected/unknown-test.html'
unknown_test = self.get_test(test_name)
self.assertRaises(KeyError, self._exp.get_expectations,
unknown_test)
- self.assert_exp('failures/expected/crash.html', IMAGE)
+ self.assert_exp('failures/expected/crash.html', PASS)
def test_get_modifiers(self):
self.parse_exp(self.get_basic_expectations())
@@ -170,11 +168,6 @@ BUGX WONTFIX : failures/expected = IMAGE
self.assertEqual(s,
set([self.get_test('failures/expected/crash.html'),
self.get_test('failures/expected/image_checksum.html')]))
- s = self._exp.get_test_set(WONTFIX, CRASH)
- self.assertEqual(s,
- set([self.get_test('failures/expected/crash.html')]))
- s = self._exp.get_test_set(WONTFIX, CRASH, include_skips=False)
- self.assertEqual(s, set([]))
def test_parse_warning(self):
try:
@@ -191,40 +184,33 @@ BUGX WONTFIX : failures/expected = IMAGE
"expectations:2 Path does not exist. non-existent-test.html")
self.assertEqual(str(e), warnings)
- try:
- self.parse_exp('SKIP : failures/expected/text.html = FAIL', is_lint_mode=True)
- self.assertFalse(True, "ParseError wasn't raised")
- except ParseError, e:
- warnings = u'expectations:1 Test lacks BUG modifier. failures/expected/text.html'
- self.assertEqual(str(e), warnings)
-
def test_error_on_different_platform(self):
# parse_exp uses a Windows port. Assert errors on Mac show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST MAC : failures/expected/text.html = FAIL\nBUG_TEST MAC : failures/expected/text.html = FAIL',
+ 'Bug(test) [ Mac ] failures/expected/text.html [ Failure ]\nBug(test) [ Mac ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_error_on_different_build_type(self):
# parse_exp uses a Release port. Assert errors on DEBUG show up in lint mode.
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST DEBUG : failures/expected/text.html = FAIL\nBUG_TEST DEBUG : failures/expected/text.html = FAIL',
+ 'Bug(test) [ Debug ] failures/expected/text.html [ Failure ]\nBug(test) [ Debug ] failures/expected/text.html [ Failure ]',
is_lint_mode=True)
def test_overrides(self):
- self.parse_exp("BUG_EXP: failures/expected/text.html = FAIL",
- "BUG_OVERRIDE : failures/expected/text.html = IMAGE")
+ self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
+ "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]")
self.assert_exp('failures/expected/text.html', IMAGE)
def test_overrides__directory(self):
- self.parse_exp("BUG_EXP: failures/expected/text.html = FAIL",
- "BUG_OVERRIDE: failures/expected = CRASH")
+ self.parse_exp("Bug(exp) failures/expected/text.html [ Failure ]",
+ "Bug(override) failures/expected [ Crash ]")
self.assert_exp('failures/expected/text.html', CRASH)
self.assert_exp('failures/expected/image.html', CRASH)
def test_overrides__duplicate(self):
- self.assert_bad_expectations("BUG_EXP: failures/expected/text.html = FAIL",
- "BUG_OVERRIDE : failures/expected/text.html = IMAGE\n"
- "BUG_OVERRIDE : failures/expected/text.html = CRASH\n")
+ self.assert_bad_expectations("Bug(exp) failures/expected/text.html [ Failure ]",
+ "Bug(override) failures/expected/text.html [ ImageOnlyFailure ]\n"
+ "Bug(override) failures/expected/text.html [ Crash ]\n")
def test_pixel_tests_flag(self):
def match(test, result, pixel_tests_enabled):
@@ -236,16 +222,16 @@ BUGX WONTFIX : failures/expected = IMAGE
self.assertTrue(match('failures/expected/text.html', FAIL, False))
self.assertFalse(match('failures/expected/text.html', CRASH, True))
self.assertFalse(match('failures/expected/text.html', CRASH, False))
- self.assertTrue(match('failures/expected/image_checksum.html', IMAGE,
+ self.assertTrue(match('failures/expected/image_checksum.html', PASS,
True))
self.assertTrue(match('failures/expected/image_checksum.html', PASS,
False))
- self.assertTrue(match('failures/expected/crash.html', SKIP, False))
+ self.assertTrue(match('failures/expected/crash.html', PASS, False))
self.assertTrue(match('passes/text.html', PASS, False))
def test_more_specific_override_resets_skip(self):
- self.parse_exp("BUGX SKIP : failures/expected = FAIL\n"
- "BUGX : failures/expected/text.html = IMAGE\n")
+ self.parse_exp("Bug(x) failures/expected [ Skip ]\n"
+ "Bug(x) failures/expected/text.html [ ImageOnlyFailure ]\n")
self.assert_exp('failures/expected/text.html', IMAGE)
self.assertFalse(self._port._filesystem.join(self._port.layout_tests_dir(),
'failures/expected/text.html') in
@@ -264,7 +250,7 @@ class SkippedTests(Base):
port.skipped_layout_tests = lambda tests: set(skips)
exp = TestExpectations(port, ['failures/expected/text.html'], lint)
- # Check that the expectation is for BUG_DUMMY SKIP : ... = PASS
+ # Check that the expectation is for BUG_DUMMY SKIP : ... [ Pass ]
self.assertEquals(exp.get_modifiers('failures/expected/text.html'),
[TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
self.assertEquals(exp.get_expectations('failures/expected/text.html'), set([PASS]))
@@ -273,22 +259,22 @@ class SkippedTests(Base):
self.check(expectations='', overrides=None, skips=['failures/expected/text.html'])
def test_duplicate_skipped_test_fails_lint(self):
- self.assertRaises(ParseError, self.check, expectations='BUGX : failures/expected/text.html = text\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
+ self.assertRaises(ParseError, self.check, expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
def test_skipped_file_overrides_expectations(self):
- self.check(expectations='BUGX : failures/expected/text.html = FAIL\n', overrides=None,
+ self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_expectations(self):
- self.check(expectations='BUGX : failures/expected/text.html = FAIL\n', overrides=None,
+ self.check(expectations='Bug(x) failures/expected/text.html [ Failure ]\n', overrides=None,
skips=['failures/expected'])
def test_skipped_file_overrides_overrides(self):
- self.check(expectations='', overrides='BUGX : failures/expected/text.html = FAIL\n',
+ self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected/text.html'])
def test_skipped_dir_overrides_overrides(self):
- self.check(expectations='', overrides='BUGX : failures/expected/text.html = FAIL\n',
+ self.check(expectations='', overrides='Bug(x) failures/expected/text.html [ Failure ]\n',
skips=['failures/expected'])
def test_skipped_entry_dont_exist(self):
@@ -304,105 +290,23 @@ class SkippedTests(Base):
self.assertEqual('The following test foo/bar/baz.html from the Skipped list doesn\'t exist\n', logs)
-# FIXME: remove these tests when we stop supporting the old syntax.
-
class ExpectationSyntaxTests(Base):
- def test_missing_expectation(self):
- # This is missing the expectation.
- self.assert_bad_expectations('BUG_TEST: failures/expected/text.html')
-
- def test_missing_colon(self):
- # This is missing the modifiers and the ':'
- self.assert_bad_expectations('failures/expected/text.html = FAIL')
-
- def test_too_many_colons(self):
- self.assert_bad_expectations('BUG_TEST: failures/expected/text.html = PASS :')
-
- def test_too_many_equals_signs(self):
- self.assert_bad_expectations('BUG_TEST: failures/expected/text.html = FAIL = IMAGE')
-
def test_unrecognized_expectation(self):
- self.assert_bad_expectations('BUG_TEST: failures/expected/text.html = UNKNOWN')
+ self.assert_bad_expectations('Bug(test) failures/expected/text.html [ Unknown ]')
def test_macro(self):
- exp_str = """
-BUG_TEST WIN : failures/expected/text.html = FAIL
-"""
+ exp_str = 'Bug(test) [ Win ] failures/expected/text.html [ Failure ]'
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
-
-class NewExpectationSyntaxTests(unittest.TestCase):
- def assert_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
- bugs = bugs or []
- modifiers = modifiers or []
- expectations = expectations or []
- warnings = warnings or []
- filename = 'TestExpectations'
- line_number = 1
- expectation_line = TestExpectationParser._tokenize_line_using_new_format(filename, line, line_number)
- self.assertEquals(expectation_line.warnings, warnings)
- self.assertEquals(expectation_line.name, name)
- self.assertEquals(expectation_line.filename, filename)
- self.assertEquals(expectation_line.line_number, line_number)
- if not warnings:
- self.assertEquals(expectation_line.modifiers, modifiers)
- self.assertEquals(expectation_line.expectations, expectations)
-
- def test_bare_name(self):
- self.assert_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
-
- def test_bare_name_and_bugs(self):
- self.assert_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
- self.assert_exp('crbug.com/12345 foo.html', modifiers=['BUGCR12345', 'SKIP'], expectations=['PASS'])
- self.assert_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
- self.assert_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['BUGCR12345', 'BUGCR34567', 'SKIP'], expectations=['PASS'])
-
- def test_comments(self):
- self.assert_exp("# comment", name=None, comment="# comment")
- self.assert_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
-
- def test_config_modifiers(self):
- self.assert_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
- self.assert_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
- self.assert_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
-
- def test_unknown_config(self):
- self.assert_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
-
- def test_unknown_expectation(self):
- self.assert_exp('foo.html [ Audio ]', expectations=['Audio'])
-
- def test_skip(self):
- self.assert_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
-
- def test_slow(self):
- self.assert_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
-
- def test_wontfix(self):
- self.assert_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
-
- def test_blank_line(self):
- self.assert_exp('', name=None)
-
- def test_warnings(self):
- self.assert_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
-
- self.assert_exp('[ [', warnings=['unexpected "["'], name=None)
- self.assert_exp('crbug.com/12345 ]', warnings=['unexpected "]"'], name=None)
-
- self.assert_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.'])
-
-
-class NewExpectationSyntaxTests(unittest.TestCase):
- def assert_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
+ def assert_tokenize_exp(self, line, bugs=None, modifiers=None, expectations=None, warnings=None, comment=None, name='foo.html'):
bugs = bugs or []
modifiers = modifiers or []
expectations = expectations or []
warnings = warnings or []
filename = 'TestExpectations'
line_number = 1
- expectation_line = TestExpectationParser._tokenize_line_using_new_format(filename, line, line_number)
+ expectation_line = TestExpectationParser._tokenize_line(filename, line, line_number)
self.assertEquals(expectation_line.warnings, warnings)
self.assertEquals(expectation_line.name, name)
self.assertEquals(expectation_line.filename, filename)
@@ -412,88 +316,104 @@ class NewExpectationSyntaxTests(unittest.TestCase):
self.assertEquals(expectation_line.expectations, expectations)
def test_bare_name(self):
- self.assert_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('foo.html', modifiers=['SKIP'], expectations=['PASS'])
def test_bare_name_and_bugs(self):
- self.assert_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
- self.assert_exp('crbug.com/12345 foo.html', modifiers=['BUGCR12345', 'SKIP'], expectations=['PASS'])
- self.assert_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
- self.assert_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['BUGCR12345', 'BUGCR34567', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('crbug.com/12345 foo.html', modifiers=['BUGCR12345', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['BUGCR12345', 'BUGCR34567', 'SKIP'], expectations=['PASS'])
def test_comments(self):
- self.assert_exp("# comment", name=None, comment="# comment")
- self.assert_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
+ self.assert_tokenize_exp("# comment", name=None, comment="# comment")
+ self.assert_tokenize_exp("foo.html # comment", comment="# comment", expectations=['PASS'], modifiers=['SKIP'])
def test_config_modifiers(self):
- self.assert_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
- self.assert_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
- self.assert_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
+ self.assert_tokenize_exp('[ Mac ] foo.html', modifiers=['MAC', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('[ Mac Vista ] foo.html', modifiers=['MAC', 'VISTA', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('[ Mac ] foo.html [ Failure ] ', modifiers=['MAC'], expectations=['FAIL'])
def test_unknown_config(self):
- self.assert_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
def test_unknown_expectation(self):
- self.assert_exp('foo.html [ Audio ]', expectations=['Audio'])
+ self.assert_tokenize_exp('foo.html [ Audio ]', expectations=['Audio'])
def test_skip(self):
- self.assert_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
def test_slow(self):
- self.assert_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
+ self.assert_tokenize_exp('foo.html [ Slow ]', modifiers=['SLOW'], expectations=['PASS'])
def test_wontfix(self):
- self.assert_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
def test_blank_line(self):
- self.assert_exp('', name=None)
+ self.assert_tokenize_exp('', name=None)
def test_warnings(self):
- self.assert_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
- self.assert_exp('[ [', warnings=['unexpected "["'], name=None)
- self.assert_exp('crbug.com/12345 ]', warnings=['unexpected "]"'], name=None)
+ self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
+ self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None)
+ self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"'], name=None)
- self.assert_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.'])
+ self.assert_tokenize_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.'])
class SemanticTests(Base):
def test_bug_format(self):
- self.assertRaises(ParseError, self.parse_exp, 'BUG1234 : failures/expected/text.html = FAIL', is_lint_mode=True)
+ self.assertRaises(ParseError, self.parse_exp, 'BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
def test_bad_bugid(self):
try:
- self.parse_exp('BUG1234 SLOW : failures/expected/text.html = FAIL', is_lint_mode=True)
+ self.parse_exp('BUG1234 failures/expected/text.html [ Failure ]', is_lint_mode=True)
self.fail('should have raised an error about a bad bug identifier')
except ParseError, exp:
self.assertEquals(len(exp.warnings), 1)
def test_missing_bugid(self):
- self.parse_exp('SLOW : failures/expected/text.html = FAIL')
+ self.parse_exp('failures/expected/text.html [ Failure ]')
+ self.assertTrue(self._exp.has_warnings())
+
+ self.parse_exp('failures/expected/text.html [ Failure ]')
+ line = self._exp._model.get_expectation_line('failures/expected/text.html')
+ self.assertFalse(line.is_invalid())
+ self.assertEquals(line.warnings, ['Test lacks BUG modifier.'])
+
+ def test_skip_and_wontfix(self):
+ # Skip and WontFix are not allowed to have other expectations as well, because those
+ # expectations won't be exercised and may become stale .
+ self.parse_exp('failures/expected/text.html [ Failure Skip ]')
+ self.assertTrue(self._exp.has_warnings())
+
+ self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
+ self.assertTrue(self._exp.has_warnings())
+
+ self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
self.assertTrue(self._exp.has_warnings())
def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST SLOW : failures/expected/timeout.html = TIMEOUT', is_lint_mode=True)
+ 'Bug(test) failures/expected/timeout.html [ Slow Timeout ]', is_lint_mode=True)
def test_rebaseline(self):
# Can't lint a file w/ 'REBASELINE' in it.
self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST REBASELINE : failures/expected/text.html = FAIL',
+ 'Bug(test) failures/expected/text.html [ Failure Rebaseline ]',
is_lint_mode=True)
def test_duplicates(self):
self.assertRaises(ParseError, self.parse_exp, """
-BUG_EXP : failures/expected/text.html = FAIL
-BUG_EXP : failures/expected/text.html = IMAGE""", is_lint_mode=True)
+Bug(exp) failures/expected/text.html [ Failure ]
+Bug(exp) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
self.assertRaises(ParseError, self.parse_exp,
self.get_basic_expectations(), overrides="""
-BUG_OVERRIDE : failures/expected/text.html = FAIL
-BUG_OVERRIDE : failures/expected/text.html = IMAGE""", is_lint_mode=True)
+Bug(override) failures/expected/text.html [ Failure ]
+Bug(override) failures/expected/text.html [ ImageOnlyFailure ]""", is_lint_mode=True)
def test_missing_file(self):
- # This should log a non-fatal error.
- self.parse_exp('BUG_TEST : missing_file.html = FAIL')
+ self.parse_exp('Bug(test) missing_file.html [ Failure ]')
self.assertTrue(self._exp.has_warnings(), 1)
@@ -502,36 +422,36 @@ class PrecedenceTests(Base):
# This tests handling precedence of specific lines over directories
# and tests expectations covering entire directories.
exp_str = """
-BUGX : failures/expected/text.html = FAIL
-BUGX WONTFIX : failures/expected = IMAGE
+Bug(x) failures/expected/text.html [ Failure ]
+Bug(y) failures/expected [ WontFix ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
- self.assert_exp('failures/expected/crash.html', IMAGE)
+ self.assert_exp('failures/expected/crash.html', PASS)
exp_str = """
-BUGX WONTFIX : failures/expected = IMAGE
-BUGX : failures/expected/text.html = FAIL
+Bug(x) failures/expected [ WontFix ]
+Bug(y) failures/expected/text.html [ Failure ]
"""
self.parse_exp(exp_str)
self.assert_exp('failures/expected/text.html', FAIL)
- self.assert_exp('failures/expected/crash.html', IMAGE)
+ self.assert_exp('failures/expected/crash.html', PASS)
def test_ambiguous(self):
- self.assert_bad_expectations("BUG_TEST RELEASE : passes/text.html = PASS\n"
- "BUG_TEST WIN : passes/text.html = FAIL\n")
+ self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
+ "Bug(test) [ Win ] passes/text.html [ Failure ]\n")
def test_more_modifiers(self):
- self.assert_bad_expectations("BUG_TEST RELEASE : passes/text.html = PASS\n"
- "BUG_TEST WIN RELEASE : passes/text.html = FAIL\n")
+ self.assert_bad_expectations("Bug(test) [ Release ] passes/text.html [ Pass ]\n"
+ "Bug(test) [ Win Release ] passes/text.html [ Failure ]\n")
def test_order_in_file(self):
- self.assert_bad_expectations("BUG_TEST WIN RELEASE : passes/text.html = FAIL\n"
- "BUG_TEST RELEASE : passes/text.html = PASS\n")
+ self.assert_bad_expectations("Bug(test) [ Win Release ] : passes/text.html [ Failure ]\n"
+ "Bug(test) [ Release ] : passes/text.html [ Pass ]\n")
def test_macro_overrides(self):
- self.assert_bad_expectations("BUG_TEST WIN : passes/text.html = PASS\n"
- "BUG_TEST XP : passes/text.html = FAIL\n")
+ self.assert_bad_expectations("Bug(test) [ Win ] passes/text.html [ Pass ]\n"
+ "Bug(test) [ XP ] passes/text.html [ Failure ]\n")
class RemoveConfigurationsTest(Base):
@@ -598,69 +518,6 @@ class RebaseliningTest(Base):
self.assertEqual(len(self._exp.get_rebaselining_failures()), 0)
-# FIXME: remove most of these tests when we no longer support the old syntax.
-
-class TestExpectationParserTests(unittest.TestCase):
- def _tokenize(self, line):
- return TestExpectationParser._tokenize_line('path', line, 0)
-
- def test_tokenize_blank(self):
- expectation = self._tokenize('')
- self.assertEqual(expectation.comment, None)
- self.assertEqual(len(expectation.warnings), 0)
-
- def test_tokenize_extra_colon(self):
- expectation = self._tokenize('FOO : : bar')
- self.assertEqual(expectation.warnings, ['":" is not legal in the new TestExpectations syntax.'])
-
- def test_tokenize_empty_comment(self):
- expectation = self._tokenize('//')
- self.assertEqual(expectation.comment, '')
- self.assertEqual(len(expectation.warnings), 0)
-
- def test_tokenize_comment(self):
- expectation = self._tokenize('//Qux.')
- self.assertEqual(expectation.comment, 'Qux.')
- self.assertEqual(len(expectation.warnings), 0)
-
- def test_tokenize_missing_equal(self):
- expectation = self._tokenize('FOO : bar')
- self.assertEqual(expectation.warnings, ['":" is not legal in the new TestExpectations syntax.'])
-
- def test_tokenize_extra_equal(self):
- expectation = self._tokenize('FOO : bar = BAZ = Qux.')
- self.assertEqual(expectation.warnings, ['":" is not legal in the new TestExpectations syntax.'])
-
- def test_tokenize_valid(self):
- expectation = self._tokenize('FOO : bar = BAZ')
- self.assertEqual(expectation.comment, None)
- self.assertEqual(len(expectation.warnings), 0)
-
- def test_tokenize_valid_with_comment(self):
- expectation = self._tokenize('FOO : bar = BAZ //Qux.')
- self.assertEqual(expectation.comment, 'Qux.')
- self.assertEqual(str(expectation.modifiers), "['FOO']")
- self.assertEqual(str(expectation.expectations), "['BAZ']")
- self.assertEqual(len(expectation.warnings), 0)
-
- def test_tokenize_valid_with_multiple_modifiers(self):
- expectation = self._tokenize('FOO1 FOO2 : bar = BAZ //Qux.')
- self.assertEqual(expectation.comment, 'Qux.')
- self.assertEqual(str(expectation.modifiers), "['FOO1', 'FOO2']")
- self.assertEqual(str(expectation.expectations), "['BAZ']")
- self.assertEqual(len(expectation.warnings), 0)
-
- def test_parse_empty_string(self):
- host = MockHost()
- test_port = host.port_factory.get('test-win-xp', None)
- test_port.test_exists = lambda test: True
- full_test_list = []
- expectation_line = self._tokenize('')
- parser = TestExpectationParser(test_port, full_test_list, allow_rebaseline_modifier=False)
- parser._parse_line(expectation_line)
- self.assertFalse(expectation_line.is_invalid())
-
-
class TestExpectationSerializationTests(unittest.TestCase):
def __init__(self, testFunc):
host = MockHost()
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
index 346d5a640..d6c83014a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
@@ -38,11 +38,12 @@ class TestResult(object):
def loads(string):
return cPickle.loads(string)
- def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False):
+ def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False, is_reftest=False):
self.test_name = test_name
self.failures = failures or []
self.test_run_time = test_run_time or 0
self.has_stderr = has_stderr
+ self.is_reftest = is_reftest
# FIXME: Setting this in the constructor makes this class hard to mutate.
self.type = test_failures.determine_result_type(failures)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index 8997a5fe8..cd57032e9 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -786,17 +786,20 @@ class Port(object):
return self._filesystem.join(self._webkit_baseline_path(port_name), 'TestExpectations')
def relative_test_filename(self, filename):
- """Returns a test_name a realtive unix-style path for a filename under the LayoutTests
- directory. Filenames outside the LayoutTests directory should raise
- an error."""
+ """Returns a test_name a relative unix-style path for a filename under the LayoutTests
+ directory. Ports may legitimately return abspaths here if no relpath makes sense."""
# Ports that run on windows need to override this method to deal with
# filenames with backslashes in them.
- assert filename.startswith(self.layout_tests_dir()), "%s did not start with %s" % (filename, self.layout_tests_dir())
- return filename[len(self.layout_tests_dir()) + 1:]
+ if filename.startswith(self.layout_tests_dir()):
+ return self.host.filesystem.relpath(filename, self.layout_tests_dir())
+ else:
+ return self.host.filesystem.abspath(filename)
def relative_perf_test_filename(self, filename):
- assert filename.startswith(self.perf_tests_dir()), "%s did not start with %s" % (filename, self.perf_tests_dir())
- return filename[len(self.perf_tests_dir()) + 1:]
+ if filename.startswith(self.perf_tests_dir()):
+ return self.host.filesystem.relpath(filename, self.perf_tests_dir())
+ else:
+ return self.host.filesystem.abspath(filename)
@memoized
def abspath_for_test(self, test_name):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
index fcccec15a..3dfeab7a3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
@@ -56,6 +56,9 @@ COMMAND_LINE_FILE = DEVICE_SOURCE_ROOT_DIR + 'chrome-native-tests-command-line'
DEVICE_DRT_DIR = DEVICE_SOURCE_ROOT_DIR + 'drt/'
DEVICE_FORWARDER_PATH = DEVICE_DRT_DIR + 'forwarder'
+# Path on the device where the test framework will create the fifo pipes.
+DEVICE_FIFO_PATH = '/data/data/org.chromium.native_test/files/'
+
DRT_APP_PACKAGE = 'org.chromium.native_test'
DRT_ACTIVITY_FULL_NAME = DRT_APP_PACKAGE + '/.ChromeNativeTestActivity'
DRT_APP_CACHE_DIR = DEVICE_DRT_DIR + 'cache/'
@@ -311,9 +314,9 @@ class ChromiumAndroidDriver(driver.Driver):
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
self._cmd_line = None
- self._in_fifo_path = DEVICE_DRT_DIR + 'DumpRenderTree.in'
- self._out_fifo_path = DEVICE_DRT_DIR + 'DumpRenderTree.out'
- self._err_fifo_path = DEVICE_DRT_DIR + 'DumpRenderTree.err'
+ self._in_fifo_path = DEVICE_FIFO_PATH + 'stdin.fifo'
+ self._out_fifo_path = DEVICE_FIFO_PATH + 'test.fifo'
+ self._err_fifo_path = DEVICE_FIFO_PATH + 'stderr.fifo'
self._read_stdout_process = None
self._read_stderr_process = None
self._forwarder_process = None
@@ -510,11 +513,7 @@ class ChromiumAndroidDriver(driver.Driver):
return self._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path
def _drt_cmd_line(self, pixel_tests, per_test_args):
- return driver.Driver.cmd_line(self, pixel_tests, per_test_args) + [
- '--in-fifo=' + self._in_fifo_path,
- '--out-fifo=' + self._out_fifo_path,
- '--err-fifo=' + self._err_fifo_path,
- ]
+ return driver.Driver.cmd_line(self, pixel_tests, per_test_args) + ['--create-stdin-fifo', '--separate-stderr-fifo']
@staticmethod
def _loop_with_timeout(condition, timeout_secs):
@@ -530,7 +529,9 @@ class ChromiumAndroidDriver(driver.Driver):
self._file_exists_on_device(self._err_fifo_path))
def _remove_all_pipes(self):
- self._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_fifo_path])
+ for file in [self._in_fifo_path, self._out_fifo_path, self._err_fifo_path]:
+ self._run_adb_command(['shell', 'rm', file])
+
return (not self._file_exists_on_device(self._in_fifo_path) and
not self._file_exists_on_device(self._out_fifo_path) and
not self._file_exists_on_device(self._err_fifo_path))
@@ -653,14 +654,16 @@ class ChromiumAndroidDriver(driver.Driver):
self._forwarder_process.kill()
self._forwarder_process = None
- if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
- raise AssertionError('Failed to remove fifo files. May be locked.')
+ if self._has_setup:
+ if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
+ raise AssertionError('Failed to remove fifo files. May be locked.')
def _command_from_driver_input(self, driver_input):
command = super(ChromiumAndroidDriver, self)._command_from_driver_input(driver_input)
if command.startswith('/'):
# Convert the host file path to a device file path. See comment of
# DEVICE_LAYOUT_TESTS_DIR for details.
+ # FIXME: what happens if command lies outside of the layout_tests_dir on the host?
command = DEVICE_LAYOUT_TESTS_DIR + self._port.relative_test_filename(command)
return command
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
index f6cc51301..2ffb77979 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
@@ -232,9 +232,8 @@ class ChromiumAndroidDriverTest(unittest.TestCase):
def test_drt_cmd_line(self):
cmd_line = self.driver._drt_cmd_line(True, ['--a'])
self.assertTrue('--a' in cmd_line)
- self.assertTrue('--in-fifo=' + chromium_android.DEVICE_DRT_DIR + 'DumpRenderTree.in' in cmd_line)
- self.assertTrue('--out-fifo=' + chromium_android.DEVICE_DRT_DIR + 'DumpRenderTree.out' in cmd_line)
- self.assertTrue('--err-fifo=' + chromium_android.DEVICE_DRT_DIR + 'DumpRenderTree.err' in cmd_line)
+ self.assertTrue('--create-stdin-fifo' in cmd_line)
+ self.assertTrue('--separate-stderr-fifo' in cmd_line)
def test_read_prompt(self):
self.driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # '])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index afb2b84d4..cbc0dbed1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -258,25 +258,25 @@ def add_unit_tests_to_mock_filesystem(filesystem):
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/platform/test')
if not filesystem.exists(LAYOUT_TEST_DIR + '/platform/test/TestExpectations'):
filesystem.write_text_file(LAYOUT_TEST_DIR + '/platform/test/TestExpectations', """
-WONTFIX : failures/expected/crash.html = CRASH
-WONTFIX : failures/expected/image.html = IMAGE
-WONTFIX : failures/expected/audio.html = FAIL
-WONTFIX : failures/expected/image_checksum.html = IMAGE
-WONTFIX : failures/expected/mismatch.html = IMAGE
-WONTFIX : failures/expected/missing_check.html = MISSING PASS
-WONTFIX : failures/expected/missing_image.html = MISSING PASS
-WONTFIX : failures/expected/missing_audio.html = MISSING PASS
-WONTFIX : failures/expected/missing_text.html = MISSING PASS
-WONTFIX : failures/expected/newlines_leading.html = FAIL
-WONTFIX : failures/expected/newlines_trailing.html = FAIL
-WONTFIX : failures/expected/newlines_with_excess_CR.html = FAIL
-WONTFIX : failures/expected/reftest.html = IMAGE
-WONTFIX : failures/expected/text.html = FAIL
-WONTFIX : failures/expected/timeout.html = TIMEOUT
-WONTFIX SKIP : failures/expected/hang.html = TIMEOUT
-WONTFIX SKIP : failures/expected/keyboard.html = CRASH
-WONTFIX SKIP : failures/expected/exception.html = CRASH
-WONTFIX SKIP : passes/skipped/skip.html = PASS
+Bug(test) failures/expected/crash.html [ Crash ]
+Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/audio.html [ Failure ]
+Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/missing_check.html [ Missing Pass ]
+Bug(test) failures/expected/missing_image.html [ Missing Pass ]
+Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
+Bug(test) failures/expected/missing_text.html [ Missing Pass ]
+Bug(test) failures/expected/newlines_leading.html [ Failure ]
+Bug(test) failures/expected/newlines_trailing.html [ Failure ]
+Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
+Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
+Bug(test) failures/expected/text.html [ Failure ]
+Bug(test) failures/expected/timeout.html [ Timeout ]
+Bug(test) failures/expected/hang.html [ WontFix ]
+Bug(test) failures/expected/keyboard.html [ WontFix ]
+Bug(test) failures/expected/exception.html [ WontFix ]
+Bug(test) passes/skipped/skip.html [ Skip ]
""")
filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
@@ -537,7 +537,7 @@ class TestDriver(Driver):
if test.exception:
raise ValueError('exception from ' + test_name)
if test.hang:
- time.sleep((float(test_input.timeout) * 4) / 1000.0)
+ time.sleep((float(test_input.timeout) * 4) / 1000.0 + 1.0) # The 1.0 comes from thread_padding_sec in layout_test_runnery.
audio = None
actual_text = test.actual_text
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index a79ab3ce3..e784cb61d 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -142,13 +142,10 @@ def _set_up_derived_options(port, options):
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
if options.additional_platform_directory:
- normalized_platform_directories = []
+ additional_platform_directories = []
for path in options.additional_platform_directory:
- if not port.host.filesystem.isabs(path):
- warnings.append("--additional-platform-directory=%s is ignored since it is not absolute" % path)
- continue
- normalized_platform_directories.append(port.host.filesystem.normpath(path))
- options.additional_platform_directory = normalized_platform_directories
+ additional_platform_directories.append(port.host.filesystem.abspath(path))
+ options.additional_platform_directory = additional_platform_directories
if not options.http and options.skipped in ('ignore', 'only'):
warnings.append("--force/--skipped=%s overrides --no-http." % (options.skipped))
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index 17a12fc6b..5e6e422eb 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -347,7 +347,9 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
'failures/expected/hang.html'],
tests_included=True)
- self.assertEqual(res, 0)
+ # Note that hang.html is marked as WontFix and all WontFix tests are
+ # expected to Pass, so that actually running them generates an "unexpected" error.
+ self.assertEqual(res, 1)
self.assertNotEmpty(out)
self.assertNotEmpty(err)
@@ -472,7 +474,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# This tests that we skip both known failing and known flaky tests. Because there are
# no known flaky tests in the default test_expectations, we add additional expectations.
host = MockHost()
- host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : passes/image.html = IMAGE PASS\n')
+ host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
batches = get_tests_run(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
has_passes_text = False
@@ -849,13 +851,11 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
-
- res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
- self.assertContains(regular_output, '--additional-platform-directory=foo is ignored since it is not absolute\n')
+ self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
def test_additional_expectations(self):
host = MockHost()
- host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : failures/unexpected/mismatch.html = IMAGE\n')
+ host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
tests_included=True, host=host))
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 69d9363f4..1dfbcd229 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -114,8 +114,8 @@ class PerfTest(object):
_description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
_result_classes = ['Time', 'JS Heap', 'Malloc']
_result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit']
- _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
def parse_output(self, output):
test_failed = False
@@ -138,7 +138,10 @@ class PerfTest(object):
score = self._score_regex.match(line)
if score:
key = score.group('key')
- value = float(score.group('value'))
+ if ', ' in score.group('value'):
+ value = [float(number) for number in score.group('value').split(', ')]
+ else:
+ value = float(score.group('value'))
unit = score.group('unit')
name = test_name
if result_class != 'Time':
@@ -154,7 +157,12 @@ class PerfTest(object):
test_failed = True
_log.error(line)
- if test_failed or set(self._statistics_keys) != set(results[test_name].keys()):
+ if test_failed:
+ return None
+
+ if set(self._statistics_keys) != set(results[test_name].keys() + ['values']):
+ # values is not provided by Dromaeo tests.
+ _log.error("The test didn't report all statistics.")
return None
for result_name in ordered_results_keys:
@@ -208,23 +216,24 @@ class PageLoadingPerfTest(PerfTest):
continue
test_times.append(output.test_time * 1000)
- test_times = sorted(test_times)
+ sorted_test_times = sorted(test_times)
# Compute the mean and variance using a numerically stable algorithm.
squareSum = 0
mean = 0
- valueSum = sum(test_times)
- for i, time in enumerate(test_times):
+ valueSum = sum(sorted_test_times)
+ for i, time in enumerate(sorted_test_times):
delta = time - mean
sweep = i + 1.0
mean += delta / sweep
squareSum += delta * delta * (i / sweep)
middle = int(len(test_times) / 2)
- results = {'avg': mean,
- 'min': min(test_times),
- 'max': max(test_times),
- 'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2,
+ results = {'values': test_times,
+ 'avg': mean,
+ 'min': sorted_test_times[0],
+ 'max': sorted_test_times[-1],
+ 'median': sorted_test_times[middle] if len(sorted_test_times) % 2 else (sorted_test_times[middle - 1] + sorted_test_times[middle]) / 2,
'stdev': math.sqrt(squareSum),
'unit': 'ms'}
self.output_statistics(self.test_name(), results, '')
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
index 2b35e71ee..27a4bb385 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -50,6 +50,7 @@ class MainTest(unittest.TestCase):
'Ignoring warm-up run (1115)',
'',
'Time:',
+ 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
@@ -60,7 +61,8 @@ class MainTest(unittest.TestCase):
try:
test = PerfTest(None, 'some-test', '/path/some-dir/some-test')
self.assertEqual(test.parse_output(output),
- {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
+ {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
+ 'values': [i for i in range(1, 20)]}})
finally:
pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
@@ -76,6 +78,7 @@ class MainTest(unittest.TestCase):
'some-unrecognizable-line',
'',
'Time:'
+ 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
'avg 1100 ms',
'median 1101 ms',
'stdev 11 ms',
@@ -109,12 +112,13 @@ class TestPageLoadingPerfTest(unittest.TestCase):
def test_run(self):
test = PageLoadingPerfTest(None, 'some-test', '/path/some-dir/some-test')
- driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
+ driver = TestPageLoadingPerfTest.MockDriver(range(1, 21))
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(test.run(driver, None),
- {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}})
+ {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms',
+ 'values': [i * 1000 for i in range(2, 21)]}})
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index 1ef3cf07e..e01b2aedb 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -191,6 +191,11 @@ class PerfTestsRunner(object):
if not output:
return self.EXIT_CODE_BAD_MERGE
results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
+ else:
+ # FIXME: Remove this code once webkit-perf.appspot.com supported "values".
+ for result in output['results'].values():
+ if isinstance(result, dict) and 'values' in result:
+ del result['values']
self._generate_output_files(output_json_path, results_page_path, output)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index 4ca9500ac..d46d7e73e 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -92,6 +92,7 @@ Ignoring warm-up run (1502)
1471
Time:
+values 1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471 ms
avg 1489.05 ms
median 1487 ms
stdev 14.46 ms
@@ -103,6 +104,7 @@ max 1510 ms
Ignoring warm-up run (1115)
Time:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
avg 1100 ms
median 1101 ms
stdev 11 ms
@@ -114,6 +116,7 @@ max 1120 ms
Ignoring warm-up run (1115)
Time:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
avg 1100 ms
median 1101 ms
stdev 11 ms
@@ -121,6 +124,7 @@ min 1080 ms
max 1120 ms
JS Heap:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
avg 832000 bytes
median 829000 bytes
stdev 15000 bytes
@@ -128,6 +132,7 @@ min 811000 bytes
max 848000 bytes
Malloc:
+values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
avg 532000 bytes
median 529000 bytes
stdev 13000 bytes
@@ -286,9 +291,10 @@ max 548000 bytes
'Finished: 0.1 s',
'', '']))
results = runner.load_output_json()[0]['results']
- self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms'})
- self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes'})
- self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'})
+ values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
+ self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
+ self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
+ self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, expected_exit_code=0):
filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
@@ -330,6 +336,12 @@ max 548000 bytes
return logs
_event_target_wrapper_and_inspector_results = {
+ "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms",
+ "values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]},
+ "inspector/pass.html:group_name:test_name": 42}
+
+ # FIXME: Remove this variance once perf-o-matic supported "values".
+ _event_target_wrapper_and_inspector_results_without_values = {
"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
"inspector/pass.html:group_name:test_name": 42}
@@ -338,7 +350,7 @@ max 548000 bytes
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
"webkit-revision": "5678", "branch": "webkit-trunk"})
def test_run_with_description(self):
@@ -347,7 +359,7 @@ max 548000 bytes
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
"timestamp": 123456789, "description": "some description",
- "results": self._event_target_wrapper_and_inspector_results,
+ "results": self._event_target_wrapper_and_inspector_results_without_values,
"webkit-revision": "5678", "branch": "webkit-trunk"})
def create_runner_and_setup_results_template(self, args=[]):
@@ -437,7 +449,7 @@ max 548000 bytes
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
"webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"})
def test_run_with_bad_slave_config_json(self):
@@ -456,7 +468,7 @@ max 548000 bytes
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
"webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"})
def test_run_with_upload_json(self):
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
index c13b22764..f12397787 100644
--- a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
@@ -107,10 +107,10 @@ class TestExpectationsTestCase(unittest.TestCase):
self.assertTrue(self._error_collector.turned_off_filtering)
def test_valid_expectations(self):
- self.assert_lines_lint(["BUGCR1234 MAC : passes/text.html = PASS FAIL"], should_pass=True)
+ self.assert_lines_lint(["crbug.com/1234 [ Mac ] passes/text.html [ Pass Failure ]"], should_pass=True)
def test_invalid_expectations(self):
- self.assert_lines_lint(["BUG1234 : passes/text.html = GIVE UP"], should_pass=False)
+ self.assert_lines_lint(["Bug(me) passes/text.html [ Give Up]"], should_pass=False)
def test_tab(self):
- self.assert_lines_lint(["\tBUGWK1 : passes/text.html = PASS"], should_pass=False, expected_output="Line contains tab character. [whitespace/tab] [5]")
+ self.assert_lines_lint(["\twebkit.org/b/1 passes/text.html [ Pass ]"], should_pass=False, expected_output="Line contains tab character. [whitespace/tab] [5]")
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
index 08c8bf685..c5875b6f2 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
@@ -203,3 +203,4 @@ class ChromiumAndroidEWS(AbstractChromiumEWS):
class MacEWS(AbstractEarlyWarningSystem):
name = "mac-ews"
port_name = "mac"
+ _default_run_tests = True
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
index 1da17e48b..79bf1ca9d 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -178,35 +178,35 @@ class PrintExpectationsTest(unittest.TestCase):
def test_basic(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
('// For test-win-xp\n'
- 'failures/expected/image.html [ ImageOnlyFailure WontFix ]\n'
- 'failures/expected/text.html [ Failure WontFix ]\n'))
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'))
def test_multiple(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
('// For test-win-vista\n'
- 'failures/expected/image.html [ ImageOnlyFailure WontFix ]\n'
- 'failures/expected/text.html [ Failure WontFix ]\n'
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'
'\n'
'// For test-win-win7\n'
- 'failures/expected/image.html [ ImageOnlyFailure WontFix ]\n'
- 'failures/expected/text.html [ Failure WontFix ]\n'
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'
'\n'
'// For test-win-xp\n'
- 'failures/expected/image.html [ ImageOnlyFailure WontFix ]\n'
- 'failures/expected/text.html [ Failure WontFix ]\n'),
+ 'failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'failures/expected/text.html [ Failure ]\n'),
platform='test-win-*')
def test_full(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
('// For test-win-xp\n'
- 'failures/expected/image.html [ ImageOnlyFailure WontFix ]\n'
- 'failures/expected/text.html [ Failure WontFix ]\n'),
+ 'Bug(test) failures/expected/image.html [ ImageOnlyFailure ]\n'
+ 'Bug(test) failures/expected/text.html [ Failure ]\n'),
full=True)
def test_exclude(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
('// For test-win-xp\n'
- 'failures/expected/text.html [ Failure WontFix ]\n'),
+ 'failures/expected/text.html [ Failure ]\n'),
exclude_keyword=['image'])
def test_include(self):
@@ -217,8 +217,8 @@ class PrintExpectationsTest(unittest.TestCase):
def test_csv(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
- ('test-win-xp,failures/expected/image.html,WONTFIX,IMAGE\n'
- 'test-win-xp,failures/expected/text.html,WONTFIX,FAIL\n'),
+ ('test-win-xp,failures/expected/image.html,BUGTEST,IMAGE\n'
+ 'test-win-xp,failures/expected/text.html,BUGTEST,FAIL\n'),
csv=True)