summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-09-26 10:42:44 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-09-26 10:42:44 +0200
commit33b26980cb24288b5a9f2590ccf32a949281bb79 (patch)
treecc0203dac37338b24b0b25a4694c0b76d4e4164b /Tools/Scripts/webkitpy
parent715be629d51174233403237bfc563cf150087dc8 (diff)
downloadqtwebkit-33b26980cb24288b5a9f2590ccf32a949281bb79.tar.gz
Imported WebKit commit c596dd7f03007fa7ed896b928106497e8784b3b5 (http://svn.webkit.org/repository/webkit/trunk@129610)
New snapshot that removes QtQuick1 support (to be moved into QtQuick1 module)
Diffstat (limited to 'Tools/Scripts/webkitpy')
-rw-r--r--Tools/Scripts/webkitpy/common/config/committers.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/test.py10
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py2
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner.py43
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py86
6 files changed, 92 insertions, 52 deletions
diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py
index fcfbf32ca..e98b25936 100644
--- a/Tools/Scripts/webkitpy/common/config/committers.py
+++ b/Tools/Scripts/webkitpy/common/config/committers.py
@@ -340,6 +340,7 @@ committers_unable_to_review = [
Committer("Mike Lawther", "mikelawther@chromium.org", "mikelawther"),
Committer("Mike Reed", "reed@google.com", "reed"),
Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]),
+ Committer("Mike West", ["mkwst@chromium.org", "mike@mikewest.org"], "mkwst"),
Committer("Mikhail Naganov", "mnaganov@chromium.org"),
Committer("Naoki Takano", ["honten@chromium.org", "takano.naoki@gmail.com"], "honten"),
Committer("Nat Duca", ["nduca@chromium.org", "nduca@google.com"], "nduca"),
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
index 0cb7aefec..28e9d63f0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -295,7 +295,7 @@ class SingleTestRunner(object):
putAllMismatchBeforeMatch = sorted
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
- reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True), self._stop_when_done)
+ reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index cbc0dbed1..d008d995d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -405,6 +405,10 @@ class TestPort(Port):
def diff_image(self, expected_contents, actual_contents, tolerance=None):
diffed = actual_contents != expected_contents
+ if not actual_contents and not expected_contents:
+ return (None, 0, None)
+ if not actual_contents or not expected_contents:
+ return (True, 0, None)
if 'ref' in expected_contents:
assert tolerance == 0
if diffed:
@@ -568,7 +572,11 @@ class TestDriver(Driver):
if stop_when_done:
self.stop()
- return DriverOutput(actual_text, test.actual_image, test.actual_checksum, audio,
+ if test.actual_checksum == test_input.image_hash:
+ image = None
+ else:
+ image = test.actual_image
+ return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 1dfbcd229..fdac35b11 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -138,7 +138,7 @@ class PerfTest(object):
score = self._score_regex.match(line)
if score:
key = score.group('key')
- if ', ' in score.group('value'):
+ if key == 'values':
value = [float(number) for number in score.group('value').split(', ')]
else:
value = float(score.group('value'))
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index e01b2aedb..c34d0b3e4 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -100,8 +100,13 @@ class PerfTestsRunner(object):
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
- optparse.make_option("--source-json-path", # FIXME: Rename it to signify the fact it's a slave configuration.
+ optparse.make_option("--reset-results", action="store_true",
+ help="Clears the content in the generated JSON file before adding the results."),
+ optparse.make_option("--slave-config-json-path",
help="Only used on bots. Path to a slave configuration file."),
+ optparse.make_option("--source-json-path", dest="slave_config_json_path",
+ # FIXME: Remove this option once build.webkit.org is updated to use --slave-config-json-path.
+ help="Deprecated. Overrides --slave-config-json-path."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
@@ -176,33 +181,31 @@ class PerfTestsRunner(object):
def _generate_and_show_results(self):
options = self._options
+ if options.test_results_server:
+ # Remove this code once build.webkit.org started using --no-show-results and --reset-results
+ options.reset_results = True
+ options.show_results = False
+
output_json_path = self._output_json_path()
output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
- if options.source_json_path:
- output = self._merge_slave_config_json(options.source_json_path, output)
+ if options.slave_config_json_path:
+ output = self._merge_slave_config_json(options.slave_config_json_path, output)
if not output:
return self.EXIT_CODE_BAD_SOURCE_JSON
- test_results_server = options.test_results_server
- results_page_path = None
- if not test_results_server:
- output = self._merge_outputs(output_json_path, output)
- if not output:
- return self.EXIT_CODE_BAD_MERGE
- results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
- else:
- # FIXME: Remove this code once webkit-perf.appspot.com supported "values".
- for result in output['results'].values():
- if isinstance(result, dict) and 'values' in result:
- del result['values']
+ output = self._merge_outputs_if_needed(output_json_path, output)
+ if not output:
+ return self.EXIT_CODE_BAD_MERGE
+ results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
self._generate_output_files(output_json_path, results_page_path, output)
- if test_results_server:
- if not self._upload_json(test_results_server, output_json_path):
+ if options.test_results_server:
+ if not self._upload_json(options.test_results_server, output_json_path):
return self.EXIT_CODE_FAILED_UPLOADING
- elif options.show_results:
+
+ if options.show_results:
self._port.show_results_html_file(results_page_path)
def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
@@ -233,8 +236,8 @@ class PerfTestsRunner(object):
_log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
return None
- def _merge_outputs(self, output_json_path, output):
- if not self._host.filesystem.isfile(output_json_path):
+ def _merge_outputs_if_needed(self, output_json_path, output):
+ if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
return [output]
try:
existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index d46d7e73e..d3de7b3df 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -340,27 +340,26 @@ max 548000 bytes
"values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]},
"inspector/pass.html:group_name:test_name": 42}
- # FIXME: Remove this variance once perf-o-matic supported "values".
- _event_target_wrapper_and_inspector_results_without_values = {
- "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "inspector/pass.html:group_name:test_name": 42}
-
def test_run_with_json_output(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
- "webkit-revision": "5678", "branch": "webkit-trunk"})
+ self.assertEqual(runner.load_output_json(), [{
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"}])
+
+ filesystem = port.host.filesystem
+ self.assertTrue(filesystem.isfile(runner._output_json_path()))
+ self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
def test_run_with_description(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--description', 'some description'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), {
+ self.assertEqual(runner.load_output_json(), [{
"timestamp": 123456789, "description": "some description",
- "results": self._event_target_wrapper_and_inspector_results_without_values,
- "webkit-revision": "5678", "branch": "webkit-trunk"})
+ "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"}])
def create_runner_and_setup_results_template(self, args=[]):
runner, port = self.create_runner(args)
@@ -380,7 +379,7 @@ max 548000 bytes
def test_run_generates_json_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
- output_json_path = filesystem.join(port.perf_results_directory(), runner._DEFAULT_JSON_FILENAME)
+ output_json_path = runner._output_json_path()
results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
self.assertFalse(filesystem.isfile(output_json_path))
@@ -395,6 +394,35 @@ max 548000 bytes
self.assertTrue(filesystem.isfile(output_json_path))
self.assertTrue(filesystem.isfile(results_page_path))
+ def test_run_merges_output_by_default(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+
+ filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{"previous": "results"}, {
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"}])
+ self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+
+ def test_run_respects_reset_results(self):
+ runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+
+ filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"}])
+ self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+ pass
+
def test_run_generates_and_show_results_page(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
@@ -444,17 +472,17 @@ max 548000 bytes
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
def test_run_with_slave_config_json(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
- "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"})
+ self.assertEqual(runner.load_output_json(), [{
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"}])
def test_run_with_bad_slave_config_json(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
- '--source-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
@@ -463,23 +491,23 @@ max 548000 bytes
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
def test_run_with_multiple_repositories(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results_without_values,
- "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"})
+ self.assertEqual(runner.load_output_json(), [{
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"}])
def test_run_with_upload_json(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
- self.assertEqual(generated_json['platform'], 'platform1')
- self.assertEqual(generated_json['builder-name'], 'builder1')
- self.assertEqual(generated_json['build-number'], 123)
+ self.assertEqual(generated_json[0]['platform'], 'platform1')
+ self.assertEqual(generated_json[0]['builder-name'], 'builder1')
+ self.assertEqual(generated_json[0]['build-number'], 123)
self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
@@ -609,7 +637,7 @@ max 548000 bytes
self.assertEqual(options.time_out_ms, '42')
self.assertEqual(options.configuration, 'Debug')
self.assertEqual(options.output_json_path, 'a/output.json')
- self.assertEqual(options.source_json_path, 'a/source.json')
+ self.assertEqual(options.slave_config_json_path, 'a/source.json')
self.assertEqual(options.test_results_server, 'somehost')