diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-07-18 13:59:13 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-07-18 13:59:28 +0200 |
commit | 4d6084feccab99c0a7b3ecef26bb49c41dd50201 (patch) | |
tree | fd1195897f551eee6d5a15d07ff5733b15aa2a5c /Tools/Scripts | |
parent | ae901828d4689ab9e89113f6b6ea8042b37a9fda (diff) | |
download | qtwebkit-4d6084feccab99c0a7b3ecef26bb49c41dd50201.tar.gz |
Imported WebKit commit ff52235a78888e5cb8e286a828a8698042200e67 (http://svn.webkit.org/repository/webkit/trunk@122948)
New snapshot that should fix the rendering issues recently introduced
Diffstat (limited to 'Tools/Scripts')
33 files changed, 1535 insertions, 1153 deletions
diff --git a/Tools/Scripts/build-webkit b/Tools/Scripts/build-webkit index 44a1bbf0f..c37bce7d4 100755 --- a/Tools/Scripts/build-webkit +++ b/Tools/Scripts/build-webkit @@ -54,7 +54,6 @@ my $showHelp = 0; my $clean = 0; my $useGYP = 0; my $minimal = 0; -my $v8 = 0; my $installHeaders; my $installLibs; my $prefixPath; @@ -96,8 +95,6 @@ push @ARGV, split(/ /, $ENV{'BUILD_WEBKIT_ARGS'}) if ($ENV{'BUILD_WEBKIT_ARGS'}) foreach (@ARGV) { if ($_ eq '--minimal') { $minimal = 1; - } elsif ($_ eq '--v8') { - $v8 = 1; } } @@ -128,7 +125,6 @@ Usage: $programName [options] [options to pass to build system] --install-headers=<path> Set installation path for the headers (Qt only) --install-libs=<path> Set installation path for the libraries (Qt only) - --v8 Use V8 as JavaScript engine (Qt only) --prefix=<path> Set installation prefix to the given path (Gtk/Efl/BlackBerry only) --makeargs=<arguments> Optional Makefile flags @@ -152,7 +148,6 @@ my %options = ( 'makeargs=s' => \$makeArgs, 'cmakeargs=s' => \$cmakeArgs, 'minimal' => \$minimal, - 'v8' => \$v8, 'only-webkit' => \$onlyWebKitProject, 'no-webkit2' => \$noWebKit2, 'coverage' => \$coverageSupport, @@ -272,13 +267,6 @@ if (isGtk()) { foreach (@features) { push @options, "DEFINES+=$_->{define}=${$_->{value}}" if $_->{define} && ${$_->{value}} != $_->{default}; } - - if ($v8) { - print "Building WebKit2 with v8 is not supported currently. Disabling WebKit2.\n"; - # FIXME: Deal with this in defaults_pre, once Qt has support for getting at the - # command line arguments at that stage. - push @options, "CONFIG+=v8 CONFIG+=no_webkit2"; - } } # If asked to build just the WebKit project, overwrite the projects diff --git a/Tools/Scripts/generate-win32-export-forwards b/Tools/Scripts/generate-win32-export-forwards index 768b3ba93..e75b430f4 100755 --- a/Tools/Scripts/generate-win32-export-forwards +++ b/Tools/Scripts/generate-win32-export-forwards @@ -27,27 +27,22 @@ import subprocess import sys import re -def exportForwardsForLibrary(library): - dumpBin = subprocess.Popen("dumpbin /directives " + library, stdout=subprocess.PIPE, universal_newlines=True); +dumpBin = subprocess.Popen("dumpbin /directives " + sys.argv[1], stdout=subprocess.PIPE, universal_newlines=True); - output, errors = dumpBin.communicate(); - return output - -libraries = sys.argv[1 : len(sys.argv) - 1] -outputFileName = sys.argv[len(sys.argv) - 1] +output, errors = dumpBin.communicate(); exportedSymbolRegexp = re.compile("\s*(?P<symbol>/EXPORT:.+)"); + symbols = set() -for lib in libraries: - for line in exportForwardsForLibrary(lib).splitlines(): - match = exportedSymbolRegexp.match(line) - if match != None: - symbols.add(match.group("symbol")) +for line in output.splitlines(): + match = exportedSymbolRegexp.match(line) + if match != None: + symbols.add(match.group("symbol")) -print "Forwarding %s symbols from %s" % (len(symbols), " ".join(libraries)) +print "Forwarding %s symbols from static library %s" % (len(symbols), sys.argv[1]) -exportFile = open(outputFileName, "w") +exportFile = open(sys.argv[2], "w") for symbol in symbols: exportFile.write("#pragma comment(linker, \"%s\")\n" % symbol); exportFile.close() diff --git a/Tools/Scripts/webkitdirs.pm b/Tools/Scripts/webkitdirs.pm index 1da09471b..58c55a49c 100755 --- a/Tools/Scripts/webkitdirs.pm +++ b/Tools/Scripts/webkitdirs.pm @@ -335,6 +335,12 @@ sub determineArchitecture $architecture = `arch`; chomp $architecture; } + + if (!$architecture && (isGtk() || isAppleMacWebKit() || isEfl())) { + # Fall back to output of `uname -m', if it is present. + $architecture = `uname -m`; + chomp $architecture; + } } sub determineNumberOfCPUs diff --git a/Tools/Scripts/webkitperl/FeatureList.pm b/Tools/Scripts/webkitperl/FeatureList.pm index ca3b9e3d6..1ca67a0d9 100644 --- a/Tools/Scripts/webkitperl/FeatureList.pm +++ b/Tools/Scripts/webkitperl/FeatureList.pm @@ -53,6 +53,7 @@ my ( $cssBoxDecorationBreakSupport, $cssExclusionsSupport, $cssFiltersSupport, + $cssImageOrientationSupport, $cssImageResolutionSupport, $cssRegionsSupport, $cssShadersSupport, @@ -166,6 +167,9 @@ my @features = ( { option => "css-box-decoration-break", desc => "Toggle CSS box-decoration-break support", define => "ENABLE_CSS_BOX_DECORATION_BREAK", default => 1, value => \$cssBoxDecorationBreakSupport }, + { option => "css-image-orientation", desc => "Toggle CSS image-orientation support", + define => "ENABLE_CSS_IMAGE_ORIENTATION", default => 0, value => \$cssImageOrientationSupport }, + { option => "css-image-resolution", desc => "Toggle CSS image-resolution support", define => "ENABLE_CSS_IMAGE_RESOLUTION", default => 0, value => \$cssImageResolutionSupport }, diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py index d244045ac..f9767168c 100644 --- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py +++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py @@ -136,17 +136,14 @@ class BaselineOptimizer(object): break # Frowns. We do not appear to be converging. unsatisfied_port_names_by_result = new_unsatisfied_port_names_by_result - self._filter_virtual_ports(new_results_by_directory) return results_by_directory, new_results_by_directory - def _filter_virtual_ports(self, new_results_by_directory): - for port in _VIRTUAL_PORTS: - virtual_directory = _VIRTUAL_PORTS[port][0] - if virtual_directory in new_results_by_directory: - real_directory = _VIRTUAL_PORTS[port][1] - if real_directory not in new_results_by_directory: - new_results_by_directory[real_directory] = new_results_by_directory[virtual_directory] - del new_results_by_directory[virtual_directory] + def _filtered_results_by_port_name(self, results_by_directory): + results_by_port_name = self._results_by_port_name(results_by_directory) + for port_name in _VIRTUAL_PORTS.keys(): + if port_name in results_by_port_name: + del results_by_port_name[port_name] + return results_by_port_name def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory): data_for_result = {} @@ -178,7 +175,7 @@ class BaselineOptimizer(object): def optimize(self, baseline_name): results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name) - if self._results_by_port_name(results_by_directory) != self._results_by_port_name(new_results_by_directory): + if self._filtered_results_by_port_name(results_by_directory) != self._filtered_results_by_port_name(new_results_by_directory): return False self._move_baselines(baseline_name, results_by_directory, new_results_by_directory) return True diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py index 9ba6ff1f2..0325991d1 100644 --- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py +++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py @@ -45,12 +45,19 @@ class TestBaselineOptimizer(BaselineOptimizer): def _read_results_by_directory(self, baseline_name): return self._mock_results_by_directory + def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory): + self.new_results_by_directory = new_results_by_directory + class BaselineOptimizerTest(unittest.TestCase): def _assertOptimization(self, results_by_directory, expected_new_results_by_directory): baseline_optimizer = TestBaselineOptimizer(results_by_directory) - _, new_results_by_directory = baseline_optimizer._find_optimal_result_placement('mock-baseline.png') - self.assertEqual(new_results_by_directory, expected_new_results_by_directory) + self.assertTrue(baseline_optimizer.optimize('mock-baseline.png')) + self.assertEqual(baseline_optimizer.new_results_by_directory, expected_new_results_by_directory) + + def _assertOptimizationFailed(self, results_by_directory): + baseline_optimizer = TestBaselineOptimizer(results_by_directory) + self.assertFalse(baseline_optimizer.optimize('mock-baseline.png')) def test_move_baselines(self): host = MockHost() @@ -135,18 +142,13 @@ class BaselineOptimizerTest(unittest.TestCase): }) def test_common_directory_includes_root(self): - # Note: The resulting directories are "wrong" in the sense that - # enacting this plan would change semantics. However, this test case - # demonstrates that we don't throw an exception in this case. :) - self._assertOptimization({ + # This test case checks that we don't throw an exception when we fail + # to optimize. + self._assertOptimizationFailed({ 'LayoutTests/platform/gtk': 'e8608763f6241ddacdd5c1ef1973ba27177d0846', 'LayoutTests/platform/qt': 'bcbd457d545986b7abf1221655d722363079ac87', 'LayoutTests/platform/chromium-win': '3764ac11e1f9fbadd87a90a2e40278319190a0d3', 'LayoutTests/platform/mac': 'e8608763f6241ddacdd5c1ef1973ba27177d0846', - }, { - 'LayoutTests/platform/qt': 'bcbd457d545986b7abf1221655d722363079ac87', - 'LayoutTests/platform/chromium-win': '3764ac11e1f9fbadd87a90a2e40278319190a0d3', - 'LayoutTests': 'e8608763f6241ddacdd5c1ef1973ba27177d0846', }) self._assertOptimization({ @@ -181,3 +183,20 @@ class BaselineOptimizerTest(unittest.TestCase): 'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7', 'LayoutTests/platform/chromium-linux': 'f52fcdde9e4be8bd5142171cd859230bd4471036' }) + + def test_virtual_ports_filtered(self): + self._assertOptimization({ + 'LayoutTests/platform/chromium-mac': '1', + 'LayoutTests/platform/chromium-mac-snowleopard': '1', + 'LayoutTests/platform/chromium-win': '2', + 'LayoutTests/platform/gtk': '3', + 'LayoutTests/platform/efl': '3', + 'LayoutTests/platform/qt': '4', + 'LayoutTests/platform/mac': '5', + }, { + 'LayoutTests/platform/chromium-mac': '1', + 'LayoutTests/platform/chromium-win': '2', + 'LayoutTests': '3', + 'LayoutTests/platform/qt': '4', + 'LayoutTests/platform/mac': '5', + }) diff --git a/Tools/Scripts/webkitpy/common/config/watchlist b/Tools/Scripts/webkitpy/common/config/watchlist index c11656322..127772c80 100755 --- a/Tools/Scripts/webkitpy/common/config/watchlist +++ b/Tools/Scripts/webkitpy/common/config/watchlist @@ -102,6 +102,17 @@ r"|Source/WebKit2/UIProcess/API/cpp/qt/" r"|Source/WebKit2/UIProcess/API/C/qt/", }, + "QtGraphics": { + "filename": r"Source/WebCore/platform/graphics/qt/" + r"|Source/WebKit2/WebProcess/WebPage/LayerTreeCoordinator/" + r"|Source/WebKit2/UIProcess/WebLayerTreeRenderer(?!\.(h|cpp))", + }, + "TextureMapper": { + "filename": r"Source/WebCore/platform/graphics/texmap/", + }, + "OpenGL": { + "filename": r"Source/WebCore/platform/graphics/opengl/", + }, "QtWebKit2PlatformSpecific": { "filename": r"Source/WebKit2/.*\.(pri|pro)" r"|Source/WebKit2/Platform/qt/" @@ -138,6 +149,24 @@ r"|Tools/DumpRenderTree/efl/" r"|LayoutTests/platform/efl/", }, + "EFLWebKit2PublicAPI": { + "filename": r"Source/WebKit2/UIProcess/API/efl/" + r"|Source/WebKit2/UIProcess/API/C/efl/", + }, + "EFLWebKit2PlatformSpecific": { + "filename": r"Source/WebKit2/.*\.(cmake|txt)" + r"|Source/WebKit2/Platform/efl/" + r"|Source/WebKit2/efl/" + r"|Source/WebKit2/Shared/API/c/efl/" + r"|Source/WebKit2/Shared/efl/" + r"|Source/WebKit2/WebProcess/InjectedBundle/efl/" + r"|Source/WebKit2/WebProcess/WebPage/efl/" + r"|Source/WebKit2/WebProcess/efl/" + r"|Source/WebKit2/WebProcess/Downloads/efl/" + r"|Source/WebKit2/WebProcess/WebCoreSupport/efl/" + r"|Source/WebKit2/UIProcess/efl/" + r"|Source/WebKit2/UIProcess/Launcher/efl/", + }, "CMake": { "filename": r".*CMakeLists\w*\.txt" r"|.*\w+\.cmake" @@ -178,6 +207,12 @@ r"|Tools/DumpRenderTree/blackberry" r"|LayoutTests/platform/blackberry", }, + "NetworkInfo": { + "filename": r"Source/WebCore/Modules/networkinfo", + }, + "Battery": { + "filename": r"Source/WebCore/Modules/battery", + }, }, "CC_RULES": { @@ -185,14 +220,17 @@ # Specifically, levin@chromium.org and levin+threading@chromium.org are # two different accounts as far as bugzilla is concerned. "AppleMacPublicApi": [ "timothy@apple.com" ], + "Battery": [ "gyuyoung.kim@samsung.com" ], "BlackBerry": [ "mifenton@rim.com" ], - "CMake": [ "rakuco@webkit.org", ], + "CMake": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ], "CSS": [ "alexis.menard@openbossa.org", "macpherson@chromium.org", "cmarcelo@webkit.org" ], "ChromiumDumpRenderTree": [ "tkent@chromium.org", ], "ChromiumGraphics": [ "jamesr@chromium.org", "cc-bugs@google.com" ], "ChromiumPublicApi": [ "abarth@webkit.org", "dglazkov@chromium.org", "fishd@chromium.org", "jamesr@chromium.org", "tkent+wkapi@chromium.org" ], "DOMAttributes": [ "cmarcelo@webkit.org", ], - "EFL": [ "rakuco@webkit.org", ], + "EFL": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ], + "EFLWebKit2PlatformSpecific": [ "gyuyoung.kim@samsung.com" ], + "EFLWebKit2PublicAPI": [ "gyuyoung.kim@samsung.com" ], "Editing": [ "mifenton@rim.com" ], "Forms": [ "tkent@chromium.org", "mifenton@rim.com" ], "FrameLoader": [ "abarth@webkit.org", "japhet@chromium.org", "jochen@chromium.org" ], @@ -201,7 +239,10 @@ "Loader": [ "japhet@chromium.org", "jochen@chromium.org" ], "MathML": [ "dbarton@mathscribe.com" ], "Media": [ "feature-media-reviews@chromium.org", "eric.carlson@apple.com" ], + "NetworkInfo": [ "gyuyoung.kim@samsung.com" ], + "OpenGL" : [ "noam.rosenthal@nokia.com" ], "QtBuildSystem" : [ "vestbo@webkit.org", ], + "QtGraphics" : [ "noam.rosenthal@nokia.com" ], "QtWebKit2PlatformSpecific": [ "alexis.menard@openbossa.org", "zoltan@webkit.org", "cmarcelo@webkit.org" ], "QtWebKit2PublicAPI": [ "alexis.menard@openbossa.org", "zoltan@webkit.org", "cmarcelo@webkit.org" ], "Rendering": [ "eric@webkit.org" ], @@ -210,6 +251,7 @@ "SoupNetwork": [ "rakuco@webkit.org", "gns@gnome.org", "mrobinson@webkit.org", "danw@gnome.org" ], "StyleChecker": [ "levin@chromium.org", ], "TestFailures": [ "abarth@webkit.org", "dglazkov@chromium.org" ], + "TextureMapper" : [ "noam.rosenthal@nokia.com" ], "ThreadingFiles|ThreadingUsage": [ "levin+threading@chromium.org", ], "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org", "jochen@chromium.org" ], "WatchListScript": [ "levin+watchlist@chromium.org", ], diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py index 7aee0c2fb..0d07d3a68 100644 --- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py @@ -417,19 +417,14 @@ class Manager(object): files = test_files[slice_start:slice_end] - tests_run_msg = 'Running: %d tests (chunk slice [%d:%d] of %d)' % ((slice_end - slice_start), slice_start, slice_end, num_tests) - self._printer.print_expected(tests_run_msg) + _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start))) # If we reached the end and we don't have enough tests, we run some # from the beginning. if slice_end - slice_start < chunk_len: extra = chunk_len - (slice_end - slice_start) - extra_msg = (' last chunk is partial, appending [0:%d]' % extra) - self._printer.print_expected(extra_msg) - tests_run_msg += "\n" + extra_msg + _log.debug(' last chunk is partial, appending [0:%d]' % extra) files.extend(test_files[0:extra]) - tests_run_filename = self._filesystem.join(self._results_directory, "tests_run.txt") - self._filesystem.write_text_file(tests_run_filename, tests_run_msg) len_skip_chunk = int(len(files) * len(skipped) / float(len(self._test_files))) skip_chunk_list = list(skipped)[0:len_skip_chunk] @@ -512,11 +507,7 @@ class Manager(object): (self._options.iterations if self._options.iterations else 1) result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations) - self._printer.print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files)) - self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes") - self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures") - self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky") - self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped") + self._printer.print_expected(num_all_test_files, result_summary, self._expectations.get_tests_with_result_type) if self._options.skipped != 'ignore': # Note that we don't actually run the skipped tests (they were @@ -526,15 +517,7 @@ class Manager(object): result = test_results.TestResult(test) result.type = test_expectations.SKIP for iteration in range(iterations): - result_summary.add(result, expected=True) - self._printer.print_expected('') - - if self._options.repeat_each > 1: - self._printer.print_expected('Running each test %d times.' % self._options.repeat_each) - if self._options.iterations > 1: - self._printer.print_expected('Running %d iterations of the tests.' % self._options.iterations) - if iterations > 1: - self._printer.print_expected('') + result_summary.add(result, expected=True, test_is_slow=self._test_is_slow(test)) return result_summary @@ -567,7 +550,13 @@ class Manager(object): def _test_is_slow(self, test_file): return self._expectations.has_modifier(test_file, test_expectations.SLOW) - def _shard_tests(self, test_files, num_workers, fully_parallel): + def _is_ref_test(self, test_input): + if test_input.reference_files is None: + # Lazy initialization. + test_input.reference_files = self._port.reference_files(test_input.test_name) + return bool(test_input.reference_files) + + def _shard_tests(self, test_files, num_workers, fully_parallel, shard_ref_tests): """Groups tests into batches. This helps ensure that tests that depend on each other (aka bad tests!) continue to run together as most cross-tests dependencies tend to @@ -581,30 +570,40 @@ class Manager(object): # own class or module. Consider grouping it with the chunking logic # in prepare_lists as well. if num_workers == 1: - return self._shard_in_two(test_files) + return self._shard_in_two(test_files, shard_ref_tests) elif fully_parallel: return self._shard_every_file(test_files) - return self._shard_by_directory(test_files, num_workers) + return self._shard_by_directory(test_files, num_workers, shard_ref_tests) - def _shard_in_two(self, test_files): + def _shard_in_two(self, test_files, shard_ref_tests): """Returns two lists of shards, one with all the tests requiring a lock and one with the rest. This is used when there's only one worker, to minimize the per-shard overhead.""" locked_inputs = [] + locked_ref_test_inputs = [] unlocked_inputs = [] + unlocked_ref_test_inputs = [] for test_file in test_files: test_input = self._get_test_input_for_file(test_file) if self._test_requires_lock(test_file): - locked_inputs.append(test_input) + if shard_ref_tests and self._is_ref_test(test_input): + locked_ref_test_inputs.append(test_input) + else: + locked_inputs.append(test_input) else: - unlocked_inputs.append(test_input) + if shard_ref_tests and self._is_ref_test(test_input): + unlocked_ref_test_inputs.append(test_input) + else: + unlocked_inputs.append(test_input) + locked_inputs.extend(locked_ref_test_inputs) + unlocked_inputs.extend(unlocked_ref_test_inputs) locked_shards = [] unlocked_shards = [] if locked_inputs: locked_shards = [TestShard('locked_tests', locked_inputs)] if unlocked_inputs: - unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)] + unlocked_shards.append(TestShard('unlocked_tests', unlocked_inputs)) return locked_shards, unlocked_shards @@ -627,7 +626,7 @@ class Manager(object): return locked_shards, unlocked_shards - def _shard_by_directory(self, test_files, num_workers): + def _shard_by_directory(self, test_files, num_workers, shard_ref_tests): """Returns two lists of shards, each shard containing all the files in a directory. This is the default mode, and gets as much parallelism as we can while @@ -635,13 +634,18 @@ class Manager(object): locked_shards = [] unlocked_shards = [] tests_by_dir = {} + ref_tests_by_dir = {} # FIXME: Given that the tests are already sorted by directory, # we can probably rewrite this to be clearer and faster. for test_file in test_files: directory = self._get_dir_for_test_file(test_file) test_input = self._get_test_input_for_file(test_file) - tests_by_dir.setdefault(directory, []) - tests_by_dir[directory].append(test_input) + if shard_ref_tests and self._is_ref_test(test_input): + ref_tests_by_dir.setdefault(directory, []) + ref_tests_by_dir[directory].append(test_input) + else: + tests_by_dir.setdefault(directory, []) + tests_by_dir[directory].append(test_input) for directory, test_inputs in tests_by_dir.iteritems(): shard = TestShard(directory, test_inputs) @@ -650,6 +654,14 @@ class Manager(object): else: unlocked_shards.append(shard) + for directory, test_inputs in ref_tests_by_dir.iteritems(): + # '~' to place the ref tests after other tests after sorted. + shard = TestShard('~ref:' + directory, test_inputs) + if self._test_requires_lock(directory): + locked_shards.append(shard) + else: + unlocked_shards.append(shard) + # Sort the shards by directory name. locked_shards.sort(key=lambda shard: shard.name) unlocked_shards.sort(key=lambda shard: shard.name) @@ -706,16 +718,6 @@ class Manager(object): extract_and_flatten(some_shards))) return new_shards - def _log_num_workers(self, num_workers, num_shards, num_locked_shards): - driver_name = self._port.driver_name() - if num_workers == 1: - self._printer.print_config("Running 1 %s over %s." % - (driver_name, grammar.pluralize('shard', num_shards))) - else: - self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)." % - (num_workers, driver_name, num_shards, num_locked_shards)) - self._printer.print_config('') - def _run_tests(self, file_list, result_summary, num_workers): """Runs the tests in the file_list. @@ -740,8 +742,8 @@ class Manager(object): keyboard_interrupted = False interrupted = False - self._printer.print_update('Sharding tests ...') - locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.fully_parallel) + self._printer.write_update('Sharding tests ...') + locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.fully_parallel, self._options.shard_ref_tests) # FIXME: We don't have a good way to coordinate the workers so that # they don't try to run the shards that need a lock if we don't actually @@ -757,7 +759,7 @@ class Manager(object): self.start_servers_with_lock(2 * min(num_workers, len(locked_shards))) num_workers = min(num_workers, len(all_shards)) - self._log_num_workers(num_workers, len(all_shards), len(locked_shards)) + self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) def worker_factory(worker_connection): return worker.Worker(worker_connection, self.results_directory(), self._options) @@ -765,7 +767,7 @@ class Manager(object): if self._options.dry_run: return (keyboard_interrupted, interrupted, self._worker_stats.values(), self._group_stats, self._all_results) - self._printer.print_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) + self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) try: with message_pool.get(self, worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: @@ -793,9 +795,6 @@ class Manager(object): self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries')) return self._filesystem.join(self._results_directory, 'retries') - def update(self): - self.update_summary(self._current_result_summary) - def needs_servers(self): return any(self._test_requires_lock(test_name) for test_name in self._test_files) and self._options.http @@ -809,12 +808,12 @@ class Manager(object): # This must be started before we check the system dependencies, # since the helper may do things to make the setup correct. if self._options.pixel_tests: - self._printer.print_update("Starting pixel test helper ...") + self._printer.write_update("Starting pixel test helper ...") self._port.start_helper() # Check that the system dependencies (themes, fonts, ...) are correct. if not self._options.nocheck_sys_deps: - self._printer.print_update("Checking system dependencies ...") + self._printer.write_update("Checking system dependencies ...") if not self._port.check_sys_deps(self.needs_servers()): self._port.stop_helper() return None @@ -827,7 +826,7 @@ class Manager(object): self._port.setup_test_run() - self._printer.print_update("Preparing tests ...") + self._printer.write_update("Preparing tests ...") result_summary = self.prepare_lists_and_print_output() if not result_summary: return None @@ -882,13 +881,9 @@ class Manager(object): self._look_for_new_crash_logs(retry_summary, start_time) self._clean_up_run() - self._print_timing_statistics(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary) - self._print_result_summary(result_summary) - - self._printer.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected) - unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted) - self._printer.print_unexpected_results(unexpected_results) + + self._printer.print_results(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results) # Re-raise a KeyboardInterrupt if necessary so the caller can handle it. if keyboard_interrupted: @@ -911,25 +906,25 @@ class Manager(object): return self._port.exit_code_from_summarized_results(unexpected_results) def start_servers_with_lock(self, number_of_servers): - self._printer.print_update('Acquiring http lock ...') + self._printer.write_update('Acquiring http lock ...') self._port.acquire_http_lock() if self._http_tests(): - self._printer.print_update('Starting HTTP server ...') + self._printer.write_update('Starting HTTP server ...') self._port.start_http_server(number_of_servers=number_of_servers) if self._websocket_tests(): - self._printer.print_update('Starting WebSocket server ...') + self._printer.write_update('Starting WebSocket server ...') self._port.start_websocket_server() self._has_http_lock = True def stop_servers_with_lock(self): if self._has_http_lock: if self._http_tests(): - self._printer.print_update('Stopping HTTP server ...') + self._printer.write_update('Stopping HTTP server ...') self._port.stop_http_server() if self._websocket_tests(): - self._printer.print_update('Stopping WebSocket server ...') + self._printer.write_update('Stopping WebSocket server ...') self._port.stop_websocket_server() - self._printer.print_update('Releasing server lock ...') + self._printer.write_update('Releasing server lock ...') self._port.release_http_lock() self._has_http_lock = False @@ -967,17 +962,6 @@ class Manager(object): writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test) writer.write_crash_log(crash_log) - def update_summary(self, result_summary): - """Update the summary and print results with any completed tests.""" - while True: - try: - result = test_results.TestResult.loads(self._result_queue.get_nowait()) - except Queue.Empty: - self._printer.print_progress(result_summary, self._retrying, self._test_files_list) - return - - self._update_summary_with_result(result_summary, result) - def _mark_interrupted_tests_as_skipped(self, result_summary): for test_name in self._test_files: if test_name not in result_summary.results: @@ -985,7 +969,7 @@ class Manager(object): # FIXME: We probably need to loop here if there are multiple iterations. # FIXME: Also, these results are really neither expected nor unexpected. We probably # need a third type of result. - result_summary.add(result, expected=False) + result_summary.add(result, expected=False, test_is_slow=self._test_is_slow(test_name)) def _interrupt_if_at_failure_limits(self, result_summary): # Note: The messages in this method are constructed to match old-run-webkit-tests @@ -1010,21 +994,25 @@ class Manager(object): def _update_summary_with_result(self, result_summary, result): if result.type == test_expectations.SKIP: - result_summary.add(result, expected=True) + exp_str = got_str = 'SKIP' + expected = True else: expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or test_failures.is_reftest_failure(result.failures)) - result_summary.add(result, expected) exp_str = self._expectations.get_expectations_string(result.test_name) got_str = self._expectations.expectation_to_string(result.type) - self._printer.print_test_result(result, expected, exp_str, got_str) - self._printer.print_progress(result_summary, self._retrying, self._test_files_list) + + result_summary.add(result, expected, self._test_is_slow(result.test_name)) + + # FIXME: there's too many arguments to this function. + self._printer.print_finished_test(result, expected, exp_str, got_str, result_summary, self._retrying, self._test_files_list) + self._interrupt_if_at_failure_limits(result_summary) def _clobber_old_results(self): # Just clobber the actual test results directories since the other # files in the results directory are explicitly used for cross-run # tracking. - self._printer.print_update("Clobbering old results in %s" % + self._printer.write_update("Clobbering old results in %s" % self._results_directory) layout_tests_dir = self._port.layout_tests_dir() possible_dirs = self._port.test_dirs() @@ -1105,53 +1093,6 @@ class Manager(object): self._filesystem.remove(times_json_path) self._filesystem.remove(incremental_results_path) - def print_config(self): - """Prints the configuration for the test run.""" - p = self._printer - p.print_config("Using port '%s'" % self._port.name()) - p.print_config("Test configuration: %s" % self._port.test_configuration()) - p.print_config("Placing test results in %s" % self._results_directory) - if self._options.new_baseline: - p.print_config("Placing new baselines in %s" % - self._port.baseline_path()) - - fallback_path = [self._filesystem.split(x)[1] for x in self._port.baseline_search_path()] - p.print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path)) - - p.print_config("Using %s build" % self._options.configuration) - if self._options.pixel_tests: - p.print_config("Pixel tests enabled") - else: - p.print_config("Pixel tests disabled") - - p.print_config("Regular timeout: %s, slow test timeout: %s" % - (self._options.time_out_ms, - self._options.slow_time_out_ms)) - - p.print_config('Command line: ' + - ' '.join(self._port.driver_cmd_line())) - p.print_config("") - - def _print_expected_results_of_type(self, result_summary, - result_type, result_type_str): - """Print the number of the tests in a given result class. - - Args: - result_summary - the object containing all the results to report on - result_type - the particular result type to report in the summary. - result_type_str - a string description of the result_type. - """ - tests = self._expectations.get_tests_with_result_type(result_type) - now = result_summary.tests_by_timeline[test_expectations.NOW] - wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] - - # We use a fancy format string in order to print the data out in a - # nicely-aligned table. - fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" - % (self._num_digits(now), self._num_digits(wontfix))) - self._printer.print_expected(fmtstr % - (len(tests), result_type_str, len(tests & now), len(tests & wontfix))) - def _num_digits(self, num): """Returns the number of digits needed to represent the length of a sequence.""" @@ -1160,217 +1101,6 @@ class Manager(object): ndigits = int(math.log10(len(num))) + 1 return ndigits - def _print_timing_statistics(self, total_time, thread_timings, - directory_test_timings, individual_test_timings, - result_summary): - """Record timing-specific information for the test run. - - Args: - total_time: total elapsed time (in seconds) for the test run - thread_timings: wall clock time each thread ran for - directory_test_timings: timing by directory - individual_test_timings: timing by file - result_summary: summary object for the test run - """ - self._printer.print_timing("Test timing:") - self._printer.print_timing(" %6.2f total testing time" % total_time) - self._printer.print_timing("") - self._printer.print_timing("Thread timing:") - cuml_time = 0 - for t in thread_timings: - self._printer.print_timing(" %10s: %5d tests, %6.2f secs" % - (t['name'], t['num_tests'], t['total_time'])) - cuml_time += t['total_time'] - self._printer.print_timing(" %6.2f cumulative, %6.2f optimal" % - (cuml_time, cuml_time / int(self._options.child_processes))) - self._printer.print_timing("") - - self._print_aggregate_test_statistics(individual_test_timings) - self._print_individual_test_times(individual_test_timings, - result_summary) - self._print_directory_timings(directory_test_timings) - - def _print_aggregate_test_statistics(self, individual_test_timings): - """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. - Args: - individual_test_timings: List of TestResults for all tests. - """ - times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings] - self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", - times_for_dump_render_tree) - - def _print_individual_test_times(self, individual_test_timings, - result_summary): - """Prints the run times for slow, timeout and crash tests. - Args: - individual_test_timings: List of TestStats for all tests. - result_summary: summary object for test run - """ - # Reverse-sort by the time spent in DumpRenderTree. - individual_test_timings.sort(lambda a, b: - cmp(b.test_run_time, a.test_run_time)) - - num_printed = 0 - slow_tests = [] - timeout_or_crash_tests = [] - unexpected_slow_tests = [] - for test_tuple in individual_test_timings: - test_name = test_tuple.test_name - is_timeout_crash_or_slow = False - if self._test_is_slow(test_name): - is_timeout_crash_or_slow = True - slow_tests.append(test_tuple) - - if test_name in result_summary.failures: - result = result_summary.results[test_name].type - if (result == test_expectations.TIMEOUT or - result == test_expectations.CRASH): - is_timeout_crash_or_slow = True - timeout_or_crash_tests.append(test_tuple) - - if (not is_timeout_crash_or_slow and - num_printed < printing.NUM_SLOW_TESTS_TO_LOG): - num_printed = num_printed + 1 - unexpected_slow_tests.append(test_tuple) - - self._printer.print_timing("") - self._print_test_list_timing("%s slowest tests that are not " - "marked as SLOW and did not timeout/crash:" % - printing.NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests) - self._printer.print_timing("") - self._print_test_list_timing("Tests marked as SLOW:", slow_tests) - self._printer.print_timing("") - self._print_test_list_timing("Tests that timed out or crashed:", - timeout_or_crash_tests) - self._printer.print_timing("") - - def _print_test_list_timing(self, title, test_list): - """Print timing info for each test. - - Args: - title: section heading - test_list: tests that fall in this section - """ - if self._printer.disabled('slowest'): - return - - self._printer.print_timing(title) - for test_tuple in test_list: - test_run_time = round(test_tuple.test_run_time, 1) - self._printer.print_timing(" %s took %s seconds" % (test_tuple.test_name, test_run_time)) - - def _print_directory_timings(self, directory_test_timings): - """Print timing info by directory for any directories that - take > 10 seconds to run. - - Args: - directory_test_timing: time info for each directory - """ - timings = [] - for directory in directory_test_timings: - num_tests, time_for_directory = directory_test_timings[directory] - timings.append((round(time_for_directory, 1), directory, - num_tests)) - timings.sort() - - self._printer.print_timing("Time to process slowest subdirectories:") - min_seconds_to_print = 10 - for timing in timings: - if timing[0] > min_seconds_to_print: - self._printer.print_timing( - " %s took %s seconds to run %s tests." % (timing[1], - timing[0], timing[2])) - self._printer.print_timing("") - - def _print_statistics_for_test_timings(self, title, timings): - """Prints the median, mean and standard deviation of the values in - timings. - - Args: - title: Title for these timings. - timings: A list of floats representing times. - """ - self._printer.print_timing(title) - timings.sort() - - num_tests = len(timings) - if not num_tests: - return - percentile90 = timings[int(.9 * num_tests)] - percentile99 = timings[int(.99 * num_tests)] - - if num_tests % 2 == 1: - median = timings[((num_tests - 1) / 2) - 1] - else: - lower = timings[num_tests / 2 - 1] - upper = timings[num_tests / 2] - median = (float(lower + upper)) / 2 - - mean = sum(timings) / num_tests - - for timing in timings: - sum_of_deviations = math.pow(timing - mean, 2) - - std_deviation = math.sqrt(sum_of_deviations / num_tests) - self._printer.print_timing(" Median: %6.3f" % median) - self._printer.print_timing(" Mean: %6.3f" % mean) - self._printer.print_timing(" 90th percentile: %6.3f" % percentile90) - self._printer.print_timing(" 99th percentile: %6.3f" % percentile99) - self._printer.print_timing(" Standard dev: %6.3f" % std_deviation) - self._printer.print_timing("") - - def _print_result_summary(self, result_summary): - """Print a short summary about how many tests passed. - - Args: - result_summary: information to log - """ - failed = result_summary.total_failures - total = result_summary.total - result_summary.expected_skips - passed = total - failed - pct_passed = 0.0 - if total > 0: - pct_passed = float(passed) * 100 / total - - self._printer.print_actual("") - self._printer.print_actual("=> Results: %d/%d tests passed (%.1f%%)" % - (passed, total, pct_passed)) - self._printer.print_actual("") - self._print_result_summary_entry(result_summary, - test_expectations.NOW, "Tests to be fixed") - - self._printer.print_actual("") - self._print_result_summary_entry(result_summary, - test_expectations.WONTFIX, - "Tests that will only be fixed if they crash (WONTFIX)") - self._printer.print_actual("") - - def _print_result_summary_entry(self, result_summary, timeline, - heading): - """Print a summary block of results for a particular timeline of test. - - Args: - result_summary: summary to print results for - timeline: the timeline to print results for (NOT, WONTFIX, etc.) - heading: a textual description of the timeline - """ - total = len(result_summary.tests_by_timeline[timeline]) - not_passing = (total - - len(result_summary.tests_by_expectation[test_expectations.PASS] & - result_summary.tests_by_timeline[timeline])) - self._printer.print_actual("=> %s (%d):" % (heading, not_passing)) - - for result in TestExpectations.EXPECTATION_ORDER: - if result == test_expectations.PASS: - continue - results = (result_summary.tests_by_expectation[result] & - result_summary.tests_by_timeline[timeline]) - desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result] - if not_passing and len(results): - pct = len(results) * 100.0 / not_passing - self._printer.print_actual(" %5d %-24s (%4.1f%%)" % - (len(results), desc[len(results) != 1], pct)) - def _copy_results_html_file(self): base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness') results_file = self._filesystem.join(base_dir, 'results.html') diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py index 27f06a70e..ae20a8a50 100644 --- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py @@ -59,9 +59,16 @@ from webkitpy.common.host_mock import MockHost class ManagerWrapper(Manager): + def __init__(self, ref_tests, **kwargs): + Manager.__init__(self, **kwargs) + self._ref_tests = ref_tests + def _get_test_input_for_file(self, test_file): return test_file + def _is_ref_test(self, test_input): + return test_input in self._ref_tests + class ShardingTests(unittest.TestCase): test_list = [ @@ -77,14 +84,21 @@ class ShardingTests(unittest.TestCase): "perf/object-keys.html", ] - def get_shards(self, num_workers, fully_parallel, test_list=None, max_locked_shards=None): + ref_tests = [ + "http/tests/security/view-source-no-refresh.html", + "http/tests/websocket/tests/websocket-protocol-ignored.html", + "ietestcenter/Javascript/11.1.5_4-4-c-1.html", + "dom/html/level2/html/HTMLAnchorElement06.html", + ] + + def get_shards(self, num_workers, fully_parallel, shard_ref_tests=False, test_list=None, max_locked_shards=None): test_list = test_list or self.test_list host = MockHost() port = host.port_factory.get(port_name='test') port._filesystem = MockFileSystem() options = MockOptions(max_locked_shards=max_locked_shards) - self.manager = ManagerWrapper(port=port, options=options, printer=Mock()) - return self.manager._shard_tests(test_list, num_workers, fully_parallel) + self.manager = ManagerWrapper(self.ref_tests, port=port, options=options, printer=Mock()) + return self.manager._shard_tests(test_list, num_workers, fully_parallel, shard_ref_tests) def test_shard_by_dir(self): locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False) @@ -110,6 +124,31 @@ class ShardingTests(unittest.TestCase): TestShard('ietestcenter/Javascript', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])]) + def test_shard_by_dir_sharding_ref_tests(self): + locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False, shard_ref_tests=True) + + # Note that although there are tests in multiple dirs that need locks, + # they are crammed into a single shard in order to reduce the # of + # workers hitting the server at once. + self.assertEquals(locked, + [TestShard('locked_shard_1', + ['http/tests/websocket/tests/unicode.htm', + 'http/tests/xmlhttprequest/supported-xml-content-types.html', + 'perf/object-keys.html', + 'http/tests/security/view-source-no-refresh.html', + 'http/tests/websocket/tests/websocket-protocol-ignored.html'])]) + self.assertEquals(unlocked, + [TestShard('animations', + ['animations/keyframes.html']), + TestShard('dom/html/level2/html', + ['dom/html/level2/html/HTMLAnchorElement03.html']), + TestShard('fast/css', + ['fast/css/display-none-inline-style-change-crash.html']), + TestShard('~ref:dom/html/level2/html', + ['dom/html/level2/html/HTMLAnchorElement06.html']), + TestShard('~ref:ietestcenter/Javascript', + ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])]) + def test_shard_every_file(self): locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True) self.assertEquals(locked, @@ -142,6 +181,23 @@ class ShardingTests(unittest.TestCase): 'ietestcenter/Javascript/11.1.5_4-4-c-1.html', 'dom/html/level2/html/HTMLAnchorElement06.html'])]) + def test_shard_in_two_sharding_ref_tests(self): + locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, shard_ref_tests=True) + self.assertEquals(locked, + [TestShard('locked_tests', + ['http/tests/websocket/tests/unicode.htm', + 'http/tests/xmlhttprequest/supported-xml-content-types.html', + 'perf/object-keys.html', + 'http/tests/security/view-source-no-refresh.html', + 'http/tests/websocket/tests/websocket-protocol-ignored.html'])]) + self.assertEquals(unlocked, + [TestShard('unlocked_tests', + ['animations/keyframes.html', + 'fast/css/display-none-inline-style-change-crash.html', + 'dom/html/level2/html/HTMLAnchorElement03.html', + 'ietestcenter/Javascript/11.1.5_4-4-c-1.html', + 'dom/html/level2/html/HTMLAnchorElement06.html'])]) + def test_shard_in_two_has_no_locked_shards(self): locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, test_list=['animations/keyframe.html']) @@ -200,25 +256,6 @@ class ManagerTest(unittest.TestCase): def get_options(self): return MockOptions(pixel_tests=False, new_baseline=False, time_out_ms=6000, slow_time_out_ms=30000, worker_model='inline') - def get_printer(self): - class FakePrinter(object): - def __init__(self): - self.output = [] - - def print_config(self, msg): - self.output.append(msg) - - return FakePrinter() - - def test_fallback_path_in_config(self): - options = self.get_options() - host = MockHost() - port = host.port_factory.get('test-mac-leopard', options=options) - printer = self.get_printer() - manager = Manager(port, options, printer) - manager.print_config() - self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in printer.output) - def test_http_locking(tester): options, args = run_webkit_tests.parse_args(['--platform=test', '--print=nothing', 'http/tests/passes', 'passes']) host = MockHost() @@ -253,6 +290,8 @@ class ManagerTest(unittest.TestCase): manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None) manager._test_files = ['foo/bar.html', 'baz.html'] + manager._test_is_slow = lambda test_name: False + result_summary = ResultSummary(expectations=Mock(), test_files=manager._test_files) result_summary.unexpected_failures = 100 result_summary.unexpected_crashes = 50 @@ -486,29 +525,30 @@ class ResultSummaryTest(unittest.TestCase): if extra_expectations: expectations += extra_expectations + test_is_slow = False paths, rs, exp = self.get_result_summary(port, tests, expectations) if expected: - rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected) - rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected) + rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow) + rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow) elif passing: - rs.add(self.get_result('passes/text.html'), expected) - rs.add(self.get_result('failures/expected/timeout.html'), expected) - rs.add(self.get_result('failures/expected/crash.html'), expected) + rs.add(self.get_result('passes/text.html'), expected, test_is_slow) + rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow) + rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow) else: - rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected) - rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected) + rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow) + rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow) for test in extra_tests: - rs.add(self.get_result(test, test_expectations.CRASH), expected) + rs.add(self.get_result(test, test_expectations.CRASH), expected, test_is_slow) retry = rs if flaky: paths, retry, exp = self.get_result_summary(port, tests, expectations) - retry.add(self.get_result('passes/text.html'), True) - retry.add(self.get_result('failures/expected/timeout.html'), True) - retry.add(self.get_result('failures/expected/crash.html'), True) + retry.add(self.get_result('passes/text.html'), True, test_is_slow) + retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow) + retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow) unexpected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False) expected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False) return expected_results, unexpected_results diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py index 837aea86b..378826c51 100644 --- a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py +++ b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py @@ -83,7 +83,9 @@ class Worker(object): self._caller.post('finished_test_list', test_list_name, len(test_inputs), elapsed_time) def _update_test_input(self, test_input): - test_input.reference_files = self._port.reference_files(test_input.test_name) + if test_input.reference_files is None: + # Lazy initialization. + test_input.reference_files = self._port.reference_files(test_input.test_name) if test_input.reference_files: test_input.should_run_pixel_test = True elif self._options.pixel_tests: diff --git a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py index d46703e8f..b0512127f 100644 --- a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py +++ b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py @@ -55,8 +55,9 @@ class ResultSummary(object): self.total_tests_by_expectation[expectation] = 0 for timeline in TestExpectations.TIMELINES.values(): self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline) + self.slow_tests = set() - def add(self, test_result, expected): + def add(self, test_result, expected, test_is_slow): self.total_tests_by_expectation[test_result.type] += 1 self.tests_by_expectation[test_result.type].add(test_result.test_name) self.results[test_result.test_name] = test_result @@ -77,3 +78,5 @@ class ResultSummary(object): self.unexpected_crashes += 1 elif test_result.type == TIMEOUT: self.unexpected_timeouts += 1 + if test_is_slow: + self.slow_tests.add(test_result.test_name) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py index fbf0b930b..dda129f2f 100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/base.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py @@ -1019,6 +1019,15 @@ class Port(object): def default_configuration(self): return self._config.default_configuration() + def process_kill_time(self): + """ Returns the amount of time in seconds to wait before killing the process. + + Within server_process.stop there is a time delta before the test is explictly + killed. By changing this the time can be extended in case the process needs + more time to cleanly exit on its own. + """ + return 3.0 + # # PROTECTED ROUTINES # diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py index 24f7efa0f..45298c634 100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py @@ -272,9 +272,6 @@ class ChromiumPort(WebKitPort): except AssertionError: return self._build_path(self.get_option('configuration'), 'layout-test-results') - def _driver_class(self): - return ChromiumDriver - def _missing_symbol_to_skipped_tests(self): # FIXME: Should WebKitPort have these definitions also? return { @@ -436,283 +433,3 @@ class ChromiumPort(WebKitPort): if sys.platform == 'cygwin': return cygpath(path) return path - - -class ChromiumDriver(WebKitDriver): - KILL_TIMEOUT_DEFAULT = 3.0 - - def __init__(self, port, worker_number, pixel_tests, no_timeout=False): - WebKitDriver.__init__(self, port, worker_number, pixel_tests, no_timeout) - self._proc = None - self._image_path = None - - # FIXME: Delete all of this driver code once we're satisfied that it's not needed any more. - #if port.host.platform.os_version == 'snowleopard': - # if not hasattr(port._options, 'additional_drt_flag'): - # port._options.additional_drt_flag = [] - # if not '--test-shell' in port._options.additional_drt_flag: - # port._options.additional_drt_flag.append('--test-shell') - - self._test_shell = '--test-shell' in port.get_option('additional_drt_flag', []) - - def _wrapper_options(self, pixel_tests): - cmd = [] - if pixel_tests: - if self._test_shell: - if not self._image_path: - self._image_path = self._port._filesystem.join(self._port.results_directory(), 'png_result%s.png' % self._worker_number) - # See note above in diff_image() for why we need _convert_path(). - cmd.append("--pixel-tests=" + self._port._convert_path(self._image_path)) - else: - cmd.append('--pixel-tests') - - # FIXME: This is not None shouldn't be necessary, unless --js-flags="''" changes behavior somehow? - if self._port.get_option('js_flags') is not None: - cmd.append('--js-flags="' + self._port.get_option('js_flags') + '"') - if self._no_timeout: - cmd.append("--no-timeout") - - # FIXME: We should be able to build this list using only an array of - # option names, the options (optparse.Values) object, and the orignal - # list of options from the main method by looking up the option - # text from the options list if the value is non-None. - # FIXME: How many of these options are still used? - option_mappings = { - 'startup_dialog': '--testshell-startup-dialog', - 'gp_fault_error_box': '--gp-fault-error-box', - 'stress_opt': '--stress-opt', - 'stress_deopt': '--stress-deopt', - 'threaded_compositing': '--enable-threaded-compositing', - 'accelerated_2d_canvas': '--enable-accelerated-2d-canvas', - 'accelerated_painting': '--enable-accelerated-painting', - 'accelerated_video': '--enable-accelerated-video', - 'enable_hardware_gpu': '--enable-hardware-gpu', - 'per_tile_painting': '--enable-per-tile-painting', - } - for nrwt_option, drt_option in option_mappings.items(): - if self._port.get_option(nrwt_option): - cmd.append(drt_option) - - cmd.extend(self._port.get_option('additional_drt_flag', [])) - return cmd - - def cmd_line(self, pixel_tests, per_test_args): - cmd = self._command_wrapper(self._port.get_option('wrapper')) - cmd.append(self._port._path_to_driver()) - cmd.extend(self._wrapper_options(pixel_tests)) - cmd.extend(per_test_args) - - if not self._test_shell: - cmd.append('-') - - return cmd - - def _start(self, pixel_tests, per_test_args): - if not self._test_shell: - return super(ChromiumDriver, self)._start(pixel_tests, per_test_args) - - assert not self._proc - # FIXME: This should use ServerProcess like WebKitDriver does. - # FIXME: We should be reading stderr and stdout separately like how WebKitDriver does. - close_fds = sys.platform != 'win32' - self._proc = subprocess.Popen(self.cmd_line(pixel_tests, per_test_args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=close_fds) - - def has_crashed(self): - if not self._test_shell: - return super(ChromiumDriver, self).has_crashed() - - if self._proc is None: - return False - return self._proc.poll() is not None - - def _write_command_and_read_line(self, input=None): - """Returns a tuple: (line, did_crash)""" - try: - if input: - if isinstance(input, unicode): - # DRT expects utf-8 - input = input.encode("utf-8") - self._proc.stdin.write(input) - # DumpRenderTree text output is always UTF-8. However some tests - # (e.g. webarchive) may spit out binary data instead of text so we - # don't bother to decode the output. - line = self._proc.stdout.readline() - # We could assert() here that line correctly decodes as UTF-8. - return (line, False) - except IOError, e: - _log.error("IOError communicating w/ DRT: " + str(e)) - return (None, True) - - def _test_shell_command(self, uri, timeoutms, checksum): - cmd = uri - if timeoutms: - cmd += ' ' + str(timeoutms) - if checksum: - cmd += ' ' + checksum - cmd += "\n" - return cmd - - def _output_image(self): - if self._image_path and self._port._filesystem.exists(self._image_path): - return self._port._filesystem.read_binary_file(self._image_path) - return None - - def _output_image_with_retry(self): - # Retry a few more times because open() sometimes fails on Windows, - # raising "IOError: [Errno 13] Permission denied:" - retry_num = 50 - timeout_seconds = 5.0 - for _ in range(retry_num): - try: - return self._output_image() - except IOError, e: - if e.errno != errno.EACCES: - raise e - # FIXME: We should have a separate retry delay. - # This implementation is likely to exceed the timeout before the expected number of retries. - time.sleep(timeout_seconds / retry_num) - return self._output_image() - - def _clear_output_image(self): - if self._image_path and self._port._filesystem.exists(self._image_path): - self._port._filesystem.remove(self._image_path) - - def run_test(self, driver_input): - if not self._test_shell: - return super(ChromiumDriver, self).run_test(driver_input) - - if not self._proc: - self._start(driver_input.should_run_pixel_test, driver_input.args) - - output = [] - error = [] - crash = False - timeout = False - actual_uri = None - actual_checksum = None - self._clear_output_image() - start_time = time.time() - has_audio = False - has_base64 = False - - uri = self.test_to_uri(driver_input.test_name) - cmd = self._test_shell_command(uri, driver_input.timeout, driver_input.image_hash) - line, crash = self._write_command_and_read_line(input=cmd) - - while not crash and line.rstrip() != "#EOF": - # Make sure we haven't crashed. - if line == '' and self._proc.poll() is not None: - # This is hex code 0xc000001d, which is used for abrupt - # termination. This happens if we hit ctrl+c from the prompt - # and we happen to be waiting on DRT. - # sdoyon: Not sure for which OS and in what circumstances the - # above code is valid. What works for me under Linux to detect - # ctrl+c is for the subprocess returncode to be negative - # SIGINT. And that agrees with the subprocess documentation. - if (-1073741510 == self._proc.returncode or - - signal.SIGINT == self._proc.returncode): - raise KeyboardInterrupt - crash = True - break - - # Don't include #URL lines in our output - if line.startswith("#URL:"): - actual_uri = line.rstrip()[5:] - if uri != actual_uri: - # GURL capitalizes the drive letter of a file URL. - if (not re.search("^file:///[a-z]:", uri) or uri.lower() != actual_uri.lower()): - _log.fatal("Test got out of sync:\n|%s|\n|%s|" % (uri, actual_uri)) - raise AssertionError("test out of sync") - elif line.startswith("#MD5:"): - actual_checksum = line.rstrip()[5:] - elif line.startswith("#TEST_TIMED_OUT"): - timeout = True - # Test timed out, but we still need to read until #EOF. - elif line.startswith("Content-Type: audio/wav"): - has_audio = True - elif line.startswith("Content-Transfer-Encoding: base64"): - has_base64 = True - elif line.startswith("Content-Length:"): - pass - elif actual_uri: - output.append(line) - else: - error.append(line) - - line, crash = self._write_command_and_read_line(input=None) - - if crash and line is not None: - error.append(line) - run_time = time.time() - start_time - output_image = self._output_image_with_retry() - - audio_bytes = None - text = None - if has_audio: - if has_base64: - audio_bytes = base64.b64decode(''.join(output)) - else: - audio_bytes = ''.join(output).rstrip() - else: - text = ''.join(output) - if not text: - text = None - - error = ''.join(error) - # Currently the stacktrace is in the text output, not error, so append the two together so - # that we can see stack in the output. See http://webkit.org/b/66806 - # FIXME: We really should properly handle the stderr output separately. - crash_log = '' - crashed_process_name = None - crashed_pid = None - if crash: - crashed_process_name = self._port.driver_name() - if self._proc: - crashed_pid = self._proc.pid - crash_log = self._port._get_crash_log(crashed_process_name, crashed_pid, text, error, newer_than=start_time) - if text: - error = error + text - - return DriverOutput(text, output_image, actual_checksum, audio=audio_bytes, - crash=crash, crashed_process_name=crashed_process_name, crashed_pid=crashed_pid, crash_log=crash_log, - test_time=run_time, timeout=timeout, error=error) - - def start(self, pixel_tests, per_test_args): - if not self._test_shell: - return super(ChromiumDriver, self).start(pixel_tests, per_test_args) - - if not self._proc: - self._start(pixel_tests, per_test_args) - - def stop(self): - if not self._test_shell: - return super(ChromiumDriver, self).stop() - - if not self._proc: - return - self._proc.stdin.close() - self._proc.stdout.close() - if self._proc.stderr: - self._proc.stderr.close() - time_out_ms = self._port.get_option('time_out_ms') - if time_out_ms and not self._no_timeout: - timeout_ratio = float(time_out_ms) / self._port.default_timeout_ms() - kill_timeout_seconds = self.KILL_TIMEOUT_DEFAULT * timeout_ratio if timeout_ratio > 1.0 else self.KILL_TIMEOUT_DEFAULT - else: - kill_timeout_seconds = self.KILL_TIMEOUT_DEFAULT - - # Closing stdin/stdout/stderr hangs sometimes on OS X, - # (see __init__(), above), and anyway we don't want to hang - # the harness if DRT is buggy, so we wait a couple - # seconds to give DRT a chance to clean up, but then - # force-kill the process if necessary. - timeout = time.time() + kill_timeout_seconds - while self._proc.poll() is None and time.time() < timeout: - time.sleep(0.1) - if self._proc.poll() is None: - _log.warning('stopping test driver timed out, killing it') - self._port._executive.kill_process(self._proc.pid) - # FIXME: This is sometime None. What is wrong? assert self._proc.poll() is not None - if self._proc.poll() is not None: - self._proc.wait() - self._proc = None diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py index 2240657c1..126b31868 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py @@ -29,15 +29,14 @@ import logging import os -import re -import signal import shlex -import shutil import threading import time from webkitpy.layout_tests.port import chromium from webkitpy.layout_tests.port import factory +from webkitpy.layout_tests.port import server_process +from webkitpy.layout_tests.port import webkit _log = logging.getLogger(__name__) @@ -123,8 +122,7 @@ DEVICE_FONTS_DIR = DEVICE_DRT_DIR + 'fonts/' # 1. as a virtual path in file urls that will be bridged to HTTP. # 2. pointing to some files that are pushed to the device for tests that # don't work on file-over-http (e.g. blob protocol tests). -DEVICE_LAYOUT_TESTS_DIR = (DEVICE_SOURCE_ROOT_DIR + 'third_party/WebKit/LayoutTests/') -FILE_TEST_URI_PREFIX = 'file://' + DEVICE_LAYOUT_TESTS_DIR +DEVICE_LAYOUT_TESTS_DIR = DEVICE_SOURCE_ROOT_DIR + 'third_party/WebKit/LayoutTests/' # Test resources that need to be accessed as files directly. # Each item can be the relative path of a directory or a file. @@ -155,22 +153,22 @@ class ChromiumAndroidPort(chromium.ChromiumPort): ] def __init__(self, host, port_name, **kwargs): - chromium.ChromiumPort.__init__(self, host, port_name, **kwargs) + super(ChromiumAndroidPort, self).__init__(host, port_name, **kwargs) - # FIXME: Stop using test_shell mode: https://bugs.webkit.org/show_bug.cgi?id=88542 if not hasattr(self._options, 'additional_drt_flag'): self._options.additional_drt_flag = [] - if not '--test-shell' in self._options.additional_drt_flag: - self._options.additional_drt_flag.append('--test-shell') + self._options.additional_drt_flag.append('--encode-binary') # The Chromium port for Android always uses the hardware GPU path. self._options.enable_hardware_gpu = True + # Shard ref tests so that they run together to avoid repeatedly driver restarts. + self._options.shard_ref_tests = True + self._operating_system = 'android' self._version = 'icecreamsandwich' self._original_governor = None self._android_base_dir = None - self._read_fifo_proc = None self._host_port = factory.PortFactory(host).get('chromium', **kwargs) @@ -197,7 +195,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort): return self._host_port.check_wdiff(logging) def check_build(self, needs_http): - result = chromium.ChromiumPort.check_build(self, needs_http) + result = super(ChromiumAndroidPort, self).check_build(needs_http) result = self.check_wdiff() and result if not result: _log.error('For complete Android build requirements, please see:') @@ -226,7 +224,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort): # FIXME: This is a temporary measure to reduce the manual work when # updating WebKit. This method should be removed when we merge # test_expectations_android.txt into TestExpectations. - expectations = chromium.ChromiumPort.test_expectations(self) + expectations = super(ChromiumAndroidPort, self).test_expectations() return expectations.replace('LINUX ', 'LINUX ANDROID ') def start_http_server(self, additional_dirs=None, number_of_servers=0): @@ -256,7 +254,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort): self._run_adb_command(['shell', 'rm', '-r', DRT_APP_CACHE_DIR]) # Start the HTTP server so that the device can access the test cases. - chromium.ChromiumPort.start_http_server(self, additional_dirs={TEST_PATH_PREFIX: self.layout_tests_dir()}) + super(ChromiumAndroidPort, self).start_http_server(additional_dirs={TEST_PATH_PREFIX: self.layout_tests_dir()}) _log.debug('Starting forwarder') self._run_adb_command(['shell', '%s %s' % (DEVICE_FORWARDER_PATH, FORWARD_PORTS)]) @@ -273,6 +271,11 @@ class ChromiumAndroidPort(chromium.ChromiumPort): 'canvas/philip', ]) + def create_driver(self, worker_number, no_timeout=False): + # We don't want the default DriverProxy which is not compatible with our driver. + # See comments in ChromiumAndroidDriver.start(). + return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout) + # Overridden private functions. def _build_path(self, *comps): @@ -323,7 +326,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort): if not stderr: stderr = '' stderr += '********* Tombstone file:\n' + self._get_last_stacktrace() - return chromium.ChromiumPort._get_crash_log(self, name, pid, stdout, stderr, newer_than) + return super(ChromiumAndroidPort, self)._get_crash_log(name, pid, stdout, stderr, newer_than) # Local private functions. @@ -453,62 +456,57 @@ class ChromiumAndroidPort(chromium.ChromiumPort): self._original_governor = None -class ChromiumAndroidDriver(chromium.ChromiumDriver): - # The controller may start multiple drivers during test, but for now we - # don't support multiple Android activities, so only one driver can be - # started at a time. - _started_driver = None - +class ChromiumAndroidDriver(webkit.WebKitDriver): def __init__(self, port, worker_number, pixel_tests, no_timeout=False): - chromium.ChromiumDriver.__init__(self, port, worker_number, pixel_tests, no_timeout) + webkit.WebKitDriver.__init__(self, port, worker_number, pixel_tests, no_timeout) + self._pixel_tests = pixel_tests self._in_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.in' self._out_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.out' - self._err_file_path = DRT_APP_FILES_DIR + 'DumpRenderTree.err' + self._err_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.err' self._restart_after_killed = False - self._read_fifo_proc = None + self._read_stdout_process = None + self._read_stderr_process = None def _command_wrapper(cls, wrapper_option): # Ignore command wrapper which is not applicable on Android. return [] def cmd_line(self, pixel_tests, per_test_args): - original_cmd = chromium.ChromiumDriver.cmd_line(self, pixel_tests, per_test_args) - cmd = [] - for param in original_cmd: - if param.startswith('--pixel-tests='): - self._device_image_path = DRT_APP_FILES_DIR + self._port.host.filesystem.basename(self._image_path) - param = '--pixel-tests=' + self._device_image_path - cmd.append(param) - - cmd.append('--in-fifo=' + self._in_fifo_path) - cmd.append('--out-fifo=' + self._out_fifo_path) - cmd.append('--err-file=' + self._err_file_path) - return cmd + return self._port._adb_command + ['shell'] def _file_exists_on_device(self, full_file_path): assert full_file_path.startswith('/') return self._port._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path - def _deadlock_detector(self, pids, normal_startup_event): + def _deadlock_detector(self, processes, normal_startup_event): time.sleep(DRT_START_STOP_TIMEOUT_SECS) if not normal_startup_event.is_set(): # If normal_startup_event is not set in time, the main thread must be blocked at # reading/writing the fifo. Kill the fifo reading/writing processes to let the # main thread escape from the deadlocked state. After that, the main thread will # treat this as a crash. - for i in pids: - self._port._executive.kill_process(i) + for i in processes: + i.kill() # Otherwise the main thread has been proceeded normally. This thread just exits silently. - def _start(self, pixel_tests, per_test_args): - if ChromiumAndroidDriver._started_driver: - ChromiumAndroidDriver._started_driver.stop() - - ChromiumAndroidDriver._started_driver = self + def _drt_cmd_line(self, pixel_tests, per_test_args): + return webkit.WebKitDriver.cmd_line(self, pixel_tests, per_test_args) + [ + '--in-fifo=' + self._in_fifo_path, + '--out-fifo=' + self._out_fifo_path, + '--err-fifo=' + self._err_fifo_path, + ] + + def start(self, pixel_tests, per_test_args): + # Only one driver instance is allowed because of the nature of Android activity. + # The single driver needs to switch between pixel test and no pixel test mode by itself. + if pixel_tests != self._pixel_tests: + self.stop() + super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args) + def _start(self, pixel_tests, per_test_args): retries = 0 while not self._start_once(pixel_tests, per_test_args): - _log.error('Failed to start DumpRenderTree application. Log:\n' + self._port._get_logcat()) + _log.error('Failed to start DumpRenderTree application. Retries=%d. Log:%s' % (retries, self._port._get_logcat())) retries += 1 if retries >= 3: raise AssertionError('Failed to start DumpRenderTree application multiple times. Give up.') @@ -516,8 +514,10 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver): time.sleep(2) def _start_once(self, pixel_tests, per_test_args): + super(ChromiumAndroidDriver, self)._start(pixel_tests, per_test_args) + self._port._run_adb_command(['logcat', '-c']) - self._port._run_adb_command(['shell', 'echo'] + self.cmd_line(pixel_tests, per_test_args) + ['>', COMMAND_LINE_FILE]) + self._port._run_adb_command(['shell', 'echo'] + self._drt_cmd_line(pixel_tests, per_test_args) + ['>', COMMAND_LINE_FILE]) start_result = self._port._run_adb_command(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', DRT_ACTIVITY_FULL_NAME]) if start_result.find('Exception') != -1: _log.error('Failed to start DumpRenderTree application. Exception:\n' + start_result) @@ -526,57 +526,59 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver): seconds = 0 while (not self._file_exists_on_device(self._in_fifo_path) or not self._file_exists_on_device(self._out_fifo_path) or - not self._file_exists_on_device(self._err_file_path)): + not self._file_exists_on_device(self._err_fifo_path)): time.sleep(1) seconds += 1 if seconds >= DRT_START_STOP_TIMEOUT_SECS: return False - shell_cmd = self._port._adb_command + ['shell'] - executive = self._port._executive - # Start a process to send command through the input fifo of the DumpRenderTree app. - # This process must be run as an interactive adb shell because the normal adb shell doesn't support stdin. - self._proc = executive.popen(shell_cmd, stdin=executive.PIPE, stdout=executive.PIPE, universal_newlines=True) # Read back the shell prompt to ensure adb shell ready. - self._read_prompt() + deadline = time.time() + DRT_START_STOP_TIMEOUT_SECS + self._server_process.start() + self._read_prompt(deadline) _log.debug('Interactive shell started') - # Start a process to read from the output fifo of the DumpRenderTree app and print to stdout. + # Start a process to read from the stdout fifo of the DumpRenderTree app and print to stdout. _log.debug('Redirecting stdout to ' + self._out_fifo_path) - self._read_fifo_proc = executive.popen(shell_cmd + ['cat', self._out_fifo_path], - stdout=executive.PIPE, universal_newlines=True) + self._read_stdout_process = server_process.ServerProcess( + self._port, 'ReadStdout', self._port._adb_command + ['shell', 'cat', self._out_fifo_path], universal_newlines=True) + self._read_stdout_process.start() + + # Start a process to read from the stderr fifo of the DumpRenderTree app and print to stdout. + _log.debug('Redirecting stderr to ' + self._err_fifo_path) + self._read_stderr_process = server_process.ServerProcess( + self._port, 'ReadStderr', self._port._adb_command + ['shell', 'cat', self._err_fifo_path], universal_newlines=True) + self._read_stderr_process.start() _log.debug('Redirecting stdin to ' + self._in_fifo_path) - (line, crash) = self._write_command_and_read_line('cat >%s\n' % self._in_fifo_path) + self._server_process.write('cat >%s\n' % self._in_fifo_path) - # Combine the two unidirectional pipes into one bidirectional pipe to make _write_command_and_read_line() etc - # work with self._proc. - self._proc.stdout.close() - self._proc.stdout = self._read_fifo_proc.stdout + # Combine the stdout and stderr pipes into self._server_process. + self._server_process.replace_outputs(self._read_stdout_process._proc.stdout, self._read_stderr_process._proc.stdout) # Start a thread to kill the pipe reading/writing processes on deadlock of the fifos during startup. normal_startup_event = threading.Event() threading.Thread(target=self._deadlock_detector, - args=([self._proc.pid, self._read_fifo_proc.pid], normal_startup_event)).start() + args=([self._server_process, self._read_stdout_process, self._read_stderr_process], normal_startup_event)).start() output = '' - while not crash and line.rstrip() != '#READY': + line = self._server_process.read_stdout_line(deadline) + while not self._server_process.timed_out and not self.has_crashed() and line.rstrip() != '#READY': output += line - (line, crash) = self._write_command_and_read_line() + line = self._server_process.read_stdout_line(deadline) - if crash: + if self._server_process.timed_out and not self.has_crashed(): # DumpRenderTree crashes during startup, or when the deadlock detector detected # deadlock and killed the fifo reading/writing processes. - _log.error('Failed to start DumpRenderTree: \n%s\nLog:\n%s' % (output, self._port._get_logcat())) - self.stop() - raise AssertionError('Failed to start DumpRenderTree application') + _log.error('Failed to start DumpRenderTree: \n%s' % output) + return False else: # Inform the deadlock detector that the startup is successful without deadlock. normal_startup_event.set() return True def run_test(self, driver_input): - driver_output = chromium.ChromiumDriver.run_test(self, driver_input) + driver_output = super(ChromiumAndroidDriver, self).run_test(driver_input) if driver_output.crash: # When Android is OOM, DRT process may be killed by ActivityManager or system OOM. # It looks like a crash but there is no fatal signal logged. Re-run the test for @@ -595,78 +597,48 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver): return self.run_test(driver_input) self._restart_after_killed = False - driver_output.error += self._get_stderr() return driver_output def stop(self): - if ChromiumAndroidDriver._started_driver != self: - return - ChromiumAndroidDriver._started_driver = None - self._port._run_adb_command(['shell', 'am', 'force-stop', DRT_APP_PACKAGE]) - if self._read_fifo_proc: - self._port._executive.kill_process(self._read_fifo_proc.pid) - self._read_fifo_proc = None - - # Here duplicate some logic in ChromiumDriver.stop() instead of directly calling it, - # because our pipe reading/writing processes won't quit by itself on close of the pipes. - if self._proc: - self._proc.stdin.close() - self._proc.stdout.close() - if self._proc.stderr: - self._proc.stderr.close() - self._port._executive.kill_process(self._proc.pid) - if self._proc.poll() is not None: - self._proc.wait() - self._proc = None + if self._read_stdout_process: + self._read_stdout_process.kill() + self._read_stdout_process = None + + if self._read_stderr_process: + self._read_stderr_process.kill() + self._read_stderr_process = None + + # Stop and kill server_process because our pipe reading/writing processes won't quit + # by itself on close of the pipes. + if self._server_process: + self._server_process.stop(kill_directly=True) + self._server_process = None + super(ChromiumAndroidDriver, self).stop() seconds = 0 while (self._file_exists_on_device(self._in_fifo_path) or self._file_exists_on_device(self._out_fifo_path) or - self._file_exists_on_device(self._err_file_path)): + self._file_exists_on_device(self._err_fifo_path)): time.sleep(1) - self._port._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_file_path]) + self._port._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_fifo_path]) seconds += 1 if seconds >= DRT_START_STOP_TIMEOUT_SECS: raise AssertionError('Failed to remove fifo files. May be locked.') - def _test_shell_command(self, uri, timeout_ms, checksum): - if uri.startswith('file:///'): - # Convert the host uri to a device uri. See comment of + def _command_from_driver_input(self, driver_input): + command = super(ChromiumAndroidDriver, self)._command_from_driver_input(driver_input) + if command.startswith('/'): + # Convert the host file path to a device file path. See comment of # DEVICE_LAYOUT_TESTS_DIR for details. - # Not overriding Port.filename_to_uri() because we don't want the - # links in the html report point to device paths. - uri = FILE_TEST_URI_PREFIX + self.uri_to_test(uri) - return chromium.ChromiumDriver._test_shell_command(self, uri, timeout_ms, checksum) - - def _write_command_and_read_line(self, input=None): - (line, crash) = chromium.ChromiumDriver._write_command_and_read_line(self, input) - url_marker = '#URL:' - if not crash: - if line.startswith(url_marker) and line.find(FILE_TEST_URI_PREFIX) == len(url_marker): - # Convert the device test uri back to host uri otherwise - # chromium.ChromiumDriver.run_test() will complain. - line = '#URL:file://%s/%s' % (self._port.layout_tests_dir(), line[len(url_marker) + len(FILE_TEST_URI_PREFIX):]) - # chromium.py uses "line == '' and self._proc.poll() is not None" to detect crash, - # but on Android "not line" is enough because self._proc.poll() seems not reliable. - if not line: - crash = True - return (line, crash) - - def _output_image(self): - if self._image_path: - _log.debug('Pulling from device: %s to %s' % (self._device_image_path, self._image_path)) - self._port._pull_from_device(self._device_image_path, self._image_path, ignore_error=True) - return chromium.ChromiumDriver._output_image(self) - - def _get_stderr(self): - return self._port._run_adb_command(['shell', 'cat', self._err_file_path], ignore_error=True) - - def _read_prompt(self): + command = DEVICE_LAYOUT_TESTS_DIR + self._port.relative_test_filename(command) + return command + + def _read_prompt(self, deadline): last_char = '' while True: - current_char = self._proc.stdout.read(1) + current_char = self._server_process.read_stdout(deadline, 1) if current_char == ' ': if last_char == '#': return diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py index 8544b020c..a3a3aaeb4 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py @@ -28,16 +28,17 @@ import optparse import StringIO +import time import unittest from webkitpy.common.system import executive_mock from webkitpy.common.system.executive_mock import MockExecutive2 from webkitpy.common.system.systemhost_mock import MockSystemHost -from webkitpy.thirdparty.mock import Mock from webkitpy.layout_tests.port import chromium_android from webkitpy.layout_tests.port import chromium_port_testcase -from webkitpy.layout_tests.port import Port +from webkitpy.layout_tests.port import driver +from webkitpy.layout_tests.port import webkit_unittest class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase): @@ -124,6 +125,10 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase): u'STDERR: /data/tombstones/tombstone_03\n' u'STDERR: mock_contents\n')) + def test_driver_cmd_line(self): + # Overriding PortTestCase.test_cmd_line(). Use ChromiumAndroidDriverTest.test_cmd_line() instead. + return + class ChromiumAndroidDriverTest(unittest.TestCase): def setUp(self): @@ -131,34 +136,30 @@ class ChromiumAndroidDriverTest(unittest.TestCase): self.driver = chromium_android.ChromiumAndroidDriver(mock_port, worker_number=0, pixel_tests=True) def test_cmd_line(self): - cmd_line = self.driver.cmd_line(True, ['--a']) + cmd_line = self.driver.cmd_line(True, ['anything']) + self.assertEquals(['adb', 'shell'], cmd_line) + + def test_drt_cmd_line(self): + cmd_line = self.driver._drt_cmd_line(True, ['--a']) self.assertTrue('--a' in cmd_line) self.assertTrue('--in-fifo=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.in' in cmd_line) self.assertTrue('--out-fifo=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.out' in cmd_line) - self.assertTrue('--err-file=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.err' in cmd_line) + self.assertTrue('--err-fifo=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.err' in cmd_line) def test_read_prompt(self): - self.driver._proc = Mock() # FIXME: This should use a tighter mock. - self.driver._proc.stdout = StringIO.StringIO("root@android:/ # ") - self.assertEquals(self.driver._read_prompt(), None) - self.driver._proc.stdout = StringIO.StringIO("$ ") - self.assertRaises(AssertionError, self.driver._read_prompt) - - def test_test_shell_command(self): - uri = 'file://%s/test.html' % self.driver._port.layout_tests_dir() - self.assertEquals(uri, 'file:///mock-checkout/LayoutTests/test.html') - expected_command = 'file:///data/local/tmp/third_party/WebKit/LayoutTests/test.html 2 checksum\n' - self.assertEquals(self.driver._test_shell_command(uri, 2, 'checksum'), expected_command) - self.assertEquals(self.driver._test_shell_command('http://test.html', 2, 'checksum'), 'http://test.html 2 checksum\n') - - def test_write_command_and_read_line(self): - self.driver._proc = Mock() # FIXME: This should use a tighter mock. - self.driver._proc.stdout = StringIO.StringIO("#URL:file:///data/local/tmp/third_party/WebKit/LayoutTests/test.html\noutput\n\n") - self.assertEquals(self.driver._write_command_and_read_line(), ('#URL:file:///mock-checkout/LayoutTests/test.html\n', False)) - self.assertEquals(self.driver._write_command_and_read_line(), ('output\n', False)) - self.assertEquals(self.driver._write_command_and_read_line(), ('\n', False)) - # Unexpected EOF is treated as crash. - self.assertEquals(self.driver._write_command_and_read_line(), ('', True)) + self.driver._server_process = webkit_unittest.MockServerProcess(['root@android:/ # ']) + self.assertEquals(self.driver._read_prompt(time.time() + 1), None) + self.driver._server_process = webkit_unittest.MockServerProcess(['$ ']) + self.assertRaises(AssertionError, self.driver._read_prompt, time.time() + 1) + + def test_command_from_driver_input(self): + driver_input = driver.DriverInput('foo/bar/test.html', 10, 'checksum', True) + expected_command = "/data/local/tmp/third_party/WebKit/LayoutTests/foo/bar/test.html'checksum\n" + self.assertEquals(self.driver._command_from_driver_input(driver_input), expected_command) + + driver_input = driver.DriverInput('http/tests/foo/bar/test.html', 10, 'checksum', True) + expected_command = "http://127.0.0.1:8000/foo/bar/test.html'checksum\n" + self.assertEquals(self.driver._command_from_driver_input(driver_input), expected_command) if __name__ == '__main__': diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py index 0c49112ba..87de41c6c 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py @@ -44,141 +44,6 @@ from webkitpy.layout_tests.port import chromium_port_testcase from webkitpy.layout_tests.port.driver import DriverInput -class ChromiumDriverTest(unittest.TestCase): - def setUp(self): - host = MockSystemHost() - options = MockOptions(configuration='Release', additional_drt_flag=['--test-shell']) - config = MockConfig(filesystem=host.filesystem, default_configuration='Release') - self.port = chromium_mac.ChromiumMacPort(host, 'chromium-mac-snowleopard', options=options, config=config) - self.driver = chromium.ChromiumDriver(self.port, worker_number=0, pixel_tests=True) - - def test_test_shell_command(self): - expected_command = "test.html 2 checksum\n" - self.assertEqual(self.driver._test_shell_command("test.html", 2, "checksum"), expected_command) - - def _assert_write_command_and_read_line(self, input=None, expected_line=None, expected_stdin=None, expected_crash=False): - if not expected_stdin: - if input: - expected_stdin = input - else: - # We reset stdin, so we should expect stdin.getValue = "" - expected_stdin = "" - self.driver._proc.stdin = StringIO.StringIO() - line, did_crash = self.driver._write_command_and_read_line(input) - self.assertEqual(self.driver._proc.stdin.getvalue(), expected_stdin) - self.assertEqual(line, expected_line) - self.assertEqual(did_crash, expected_crash) - - def test_write_command_and_read_line(self): - self.driver._proc = Mock() # FIXME: This should use a tighter mock. - # Set up to read 3 lines before we get an IOError - self.driver._proc.stdout = StringIO.StringIO("first\nsecond\nthird\n") - - unicode_input = u"I \u2661 Unicode" - utf8_input = unicode_input.encode("utf-8") - # Test unicode input conversion to utf-8 - self._assert_write_command_and_read_line(input=unicode_input, expected_stdin=utf8_input, expected_line="first\n") - # Test str() input. - self._assert_write_command_and_read_line(input="foo", expected_line="second\n") - # Test input=None - self._assert_write_command_and_read_line(expected_line="third\n") - # Test reading from a closed/empty stream. - # reading from a StringIO does not raise IOError like a real file would, so raise IOError manually. - def mock_readline(): - raise IOError - self.driver._proc.stdout.readline = mock_readline - self._assert_write_command_and_read_line(expected_crash=True) - - def test_crash_log(self): - self.driver._proc = Mock() - - # Simulate a crash by having stdout close unexpectedly. - def mock_readline(): - raise IOError - self.driver._proc.stdout.readline = mock_readline - self.driver._proc.pid = 1234 - - self.driver.test_to_uri = lambda test: 'mocktesturi' - self.driver._port.driver_name = lambda: 'mockdriver' - self.driver._port._get_crash_log = lambda name, pid, out, err, newer_than: 'mockcrashlog' - driver_output = self.driver.run_test(DriverInput(test_name='some/test.html', timeout=1, image_hash=None, should_run_pixel_test=False)) - self.assertTrue(driver_output.crash) - self.assertEqual(driver_output.crashed_process_name, 'mockdriver') - self.assertEqual(driver_output.crashed_pid, 1234) - self.assertEqual(driver_output.crash_log, 'mockcrashlog') - - def test_stop(self): - self.pid = None - self.wait_called = False - self.driver._proc = Mock() # FIXME: This should use a tighter mock. - self.driver._proc.pid = 1 - self.driver._proc.stdin = StringIO.StringIO() - self.driver._proc.stdout = StringIO.StringIO() - self.driver._proc.stderr = StringIO.StringIO() - self.driver._proc.poll = lambda: None - - def fake_wait(): - self.assertTrue(self.pid is not None) - self.wait_called = True - - self.driver._proc.wait = fake_wait - - class FakeExecutive(object): - def kill_process(other, pid): - self.pid = pid - self.driver._proc.poll = lambda: 2 - - self.driver._port._executive = FakeExecutive() - self.driver.KILL_TIMEOUT_DEFAULT = 0.01 - self.driver.stop() - self.assertTrue(self.wait_called) - self.assertEquals(self.pid, 1) - - def test_two_drivers(self): - - class MockDriver(chromium.ChromiumDriver): - def __init__(self, port): - chromium.ChromiumDriver.__init__(self, port, worker_number=0, pixel_tests=False) - - def cmd_line(self, pixel_test, per_test_args): - return 'python' - - # get_option is used to get the timeout (ms) for a process before we kill it. - driver1 = MockDriver(self.port) - driver1._start(False, []) - driver2 = MockDriver(self.port) - driver2._start(False, []) - # It's possible for driver1 to timeout when stopping if it's sharing stdin with driver2. - start_time = time.time() - driver1.stop() - driver2.stop() - self.assertTrue(time.time() - start_time < 20) - - def test_stop_cleans_up_properly(self): - self.driver._test_shell = False - self.driver.start(True, []) - last_tmpdir = self.port._filesystem.last_tmpdir - self.assertNotEquals(last_tmpdir, None) - self.driver.stop() - self.assertFalse(self.port._filesystem.isdir(last_tmpdir)) - - def test_two_starts_cleans_up_properly(self): - # clone the WebKitDriverTest tests here since we override start() and stop() - self.driver._test_shell = False - self.driver.start(True, []) - last_tmpdir = self.port._filesystem.last_tmpdir - self.driver._start(True, []) - self.assertFalse(self.port._filesystem.isdir(last_tmpdir)) - - def test_expectations_dict(self): - self.port._filesystem.write_text_file('/mock-checkout/LayoutTests/platform/chromium/TestExpectations', 'upstream') - self.port._filesystem.write_text_file('/mock-checkout/Source/WebKit/chromium/webkit/tools/layout_tests/test_expectations.txt', 'downstream') - self.assertEquals('\n'.join(self.port.expectations_dict().values()), 'upstream\ndownstream') - - self.port._filesystem.write_text_file(self.port.path_from_chromium_base('skia', 'skia_test_expectations.txt'), 'skia') - self.assertEquals('\n'.join(self.port.expectations_dict().values()), 'upstream\nskia\ndownstream') - - class ChromiumPortLoggingTest(logtesting.LoggingTestCase): # FIXME: put this someplace more useful diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py index 7e467dc12..e07a804b0 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py @@ -61,11 +61,14 @@ class ServerProcess(object): indefinitely. The class also handles transparently restarting processes as necessary to keep issuing commands.""" - def __init__(self, port_obj, name, cmd, env=None): + def __init__(self, port_obj, name, cmd, env=None, universal_newlines=False): self._port = port_obj self._name = name # Should be the command name (e.g. DumpRenderTree, ImageDiff) self._cmd = cmd self._env = env + # Set if the process outputs non-standard newlines like '\r\n' or '\r'. + # Don't set if there will be binary data or the data must be ASCII encoded. + self._universal_newlines = universal_newlines self._host = self._port.host self._pid = None self._reset() @@ -100,7 +103,8 @@ class ServerProcess(object): stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, - env=self._env) + env=self._env, + universal_newlines=self._universal_newlines) self._pid = self._proc.pid fd = self._proc.stdout.fileno() if not self._use_win32_apis: @@ -225,9 +229,18 @@ class ServerProcess(object): try: if out_fd in read_fds: - self._output += self._proc.stdout.read() + data = self._proc.stdout.read() + if not data: + _log.warning('unexpected EOF of stdout') + self._crashed = True + self._output += data + if err_fd in read_fds: - self._error += self._proc.stderr.read() + data = self._proc.stderr.read() + if not data: + _log.warning('unexpected EOF of stderr') + self._crashed = True + self._error += data except IOError, e: # We can ignore the IOErrors because we will detect if the subporcess crashed # the next time through the loop in _read() @@ -295,7 +308,7 @@ class ServerProcess(object): if not self._proc: self._start() - def stop(self): + def stop(self, kill_directly=False): if not self._proc: return @@ -307,14 +320,15 @@ class ServerProcess(object): self._proc.stdout.close() if self._proc.stderr: self._proc.stderr.close() - if not self._host.platform.is_win(): + + if kill_directly: + self.kill() + elif not self._host.platform.is_win(): # Closing stdin/stdout/stderr hangs sometimes on OS X, - # (see restart(), above), and anyway we don't want to hang - # the harness if DumpRenderTree is buggy, so we wait a couple - # seconds to give DumpRenderTree a chance to clean up, but then - # force-kill the process if necessary. - KILL_TIMEOUT = 3.0 - timeout = time.time() + KILL_TIMEOUT + # and anyway we don't want to hang the harness if DumpRenderTree + # is buggy, so we wait a couple seconds to give DumpRenderTree a + # chance to clean up, but then force-kill the process if necessary. + timeout = time.time() + self._port.process_kill_time() while self._proc.poll() is None and time.time() < timeout: time.sleep(0.01) if self._proc.poll() is None: @@ -329,3 +343,12 @@ class ServerProcess(object): if self._proc.poll() is not None: self._proc.wait() self._reset() + + def replace_outputs(self, stdout, stderr): + assert self._proc + if stdout: + self._proc.stdout.close() + self._proc.stdout = stdout + if stderr: + self._proc.stderr.close() + self._proc.stderr = stderr diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py index c0426c1cf..db38615e0 100644 --- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py @@ -48,6 +48,9 @@ class TrivialMockPort(object): def check_for_leaks(self, process_name, process_pid): pass + def process_kill_time(self): + return 1 + class MockFile(object): def __init__(self, server_process): diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py index fad6f7a5d..d376e774a 100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py @@ -531,7 +531,7 @@ class WebKitDriver(Driver): command = cygpath(command) if driver_input.image_hash: - # FIXME: Why the leading quote? + # "'" is the separator of command fields. command += "'" + driver_input.image_hash return command + "\n" @@ -552,11 +552,12 @@ class WebKitDriver(Driver): def run_test(self, driver_input): start_time = time.time() self.start(driver_input.should_run_pixel_test, driver_input.args) + test_begin_time = time.time() self.error_from_test = str() self.err_seen_eof = False command = self._command_from_driver_input(driver_input) - deadline = start_time + int(driver_input.timeout) / 1000.0 + deadline = test_begin_time + int(driver_input.timeout) / 1000.0 self._server_process.write(command) text, audio = self._read_first_block(deadline) # First block is either text or audio @@ -587,7 +588,7 @@ class WebKitDriver(Driver): self._server_process.kill() return DriverOutput(text, image, actual_image_hash, audio, - crash=self.has_crashed(), test_time=time.time() - start_time, + crash=self.has_crashed(), test_time=time.time() - test_begin_time, timeout=timeout, error=self.error_from_test, crashed_process_name=self._crashed_process_name, crashed_pid=self._crashed_pid, crash_log=crash_log) diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py index cfec29b33..54ad31919 100755 --- a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py @@ -246,15 +246,30 @@ class MockServerProcess(object): return self.lines.pop(0) + "\n" def read_stdout(self, deadline, size): - # read_stdout doesn't actually function on lines, but this is sufficient for our testing. - line = self.lines.pop(0) - assert len(line) == size - return line + first_line = self.lines[0] + if size > len(first_line): + self.lines.pop(0) + remaining_size = size - len(first_line) - 1 + if not remaining_size: + return first_line + "\n" + return first_line + "\n" + self.read_stdout(deadline, remaining_size) + result = self.lines[0][:size] + self.lines[0] = self.lines[0][size:] + return result def read_either_stdout_or_stderr_line(self, deadline): # FIXME: We should have tests which intermix stderr and stdout lines. return self.read_stdout_line(deadline), None + def start(self): + return + + def stop(self, kill_directly=False): + return + + def kill(self): + return + class WebKitDriverTest(unittest.TestCase): def test_read_block(self): @@ -279,17 +294,35 @@ class WebKitDriverTest(unittest.TestCase): 'ActualHash: actual', 'ExpectedHash: expected', 'Content-Type: image/png', - 'Content-Length: 8', + 'Content-Length: 9', "12345678", "#EOF", ]) content_block = driver._read_block(0) self.assertEquals(content_block.content_type, 'image/png') self.assertEquals(content_block.content_hash, 'actual') - self.assertEquals(content_block.content, '12345678') - self.assertEquals(content_block.decoded_content, '12345678') + self.assertEquals(content_block.content, '12345678\n') + self.assertEquals(content_block.decoded_content, '12345678\n') driver._server_process = None + def test_read_base64_block(self): + port = TestWebKitPort() + driver = WebKitDriver(port, 0, pixel_tests=True) + driver._server_process = MockServerProcess([ + 'ActualHash: actual', + 'ExpectedHash: expected', + 'Content-Type: image/png', + 'Content-Transfer-Encoding: base64', + 'Content-Length: 12', + 'MTIzNDU2NzgK#EOF', + ]) + content_block = driver._read_block(0) + self.assertEquals(content_block.content_type, 'image/png') + self.assertEquals(content_block.content_hash, 'actual') + self.assertEquals(content_block.encoding, 'base64') + self.assertEquals(content_block.content, 'MTIzNDU2NzgK') + self.assertEquals(content_block.decoded_content, '12345678\n') + def test_no_timeout(self): port = TestWebKitPort() driver = WebKitDriver(port, 0, pixel_tests=True, no_timeout=True) diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py index 691c8456b..60db587e0 100755 --- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py +++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py @@ -109,9 +109,9 @@ def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdo unexpected_result_count = -1 try: manager = Manager(port, options, printer) - manager.print_config() + printer.print_config() - printer.print_update("Collecting tests ...") + printer.write_update("Collecting tests ...") try: manager.collect_tests(args) except IOError, e: @@ -119,12 +119,12 @@ def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdo return -1 raise - printer.print_update("Checking build ...") + printer.write_update("Checking build ...") if not port.check_build(manager.needs_servers()): _log.error("Build check failed") return -1 - printer.print_update("Parsing expectations ...") + printer.write_update("Parsing expectations ...") manager.parse_expectations() unexpected_result_count = manager.run() @@ -420,6 +420,11 @@ def parse_args(args=None): help="Don't re-try any tests that produce unexpected results."), optparse.make_option("--max-locked-shards", type="int", help="Set the maximum number of locked shards"), + # For chromium-android to reduce the cost of restarting the driver. + # FIXME: Remove the option once per-test arg is supported: + # https://bugs.webkit.org/show_bug.cgi?id=91539. + optparse.make_option("--shard-ref-tests", action="store_true", + help="Run ref tests in dedicated shard(s). Enabled on Android by default."), ])) option_group_definitions.append(("Miscellaneous Options", [ @@ -482,8 +487,10 @@ def main(argv=None): if '__main__' == __name__: try: - sys.exit(main()) + return_code = main() except BaseException, e: if e.__class__ in (KeyboardInterrupt, TestRunInterruptedException): sys.exit(INTERRUPTED_EXIT_STATUS) sys.exit(EXCEPTIONAL_EXIT_STATUS) + + sys.exit(return_code) diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py index 2dd909930..1c2fecd7b 100644 --- a/Tools/Scripts/webkitpy/layout_tests/views/printing.py +++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py @@ -29,10 +29,12 @@ """Package that handles non-debug, non-file output for run-webkit-tests.""" +import math import optparse from webkitpy.tool import grammar from webkitpy.common.net import resultsjsonparser +from webkitpy.layout_tests.models import test_expectations from webkitpy.layout_tests.models.test_expectations import TestExpectations from webkitpy.layout_tests.views.metered_stream import MeteredStream @@ -192,15 +194,313 @@ class Printer(object): def help_printing(self): self._write(HELP_PRINTING) + def print_config(self): + """Prints the configuration for the test run.""" + self._print_config("Using port '%s'" % self._port.name()) + self._print_config("Test configuration: %s" % self._port.test_configuration()) + self._print_config("Placing test results in %s" % self._options.results_directory) + + # FIXME: should these options be in printing_options? + if self._options.new_baseline: + self._print_config("Placing new baselines in %s" % self._port.baseline_path()) + + fs = self._port.host.filesystem + fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()] + self._print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path)) + + self._print_config("Using %s build" % self._options.configuration) + if self._options.pixel_tests: + self._print_config("Pixel tests enabled") + else: + self._print_config("Pixel tests disabled") + + self._print_config("Regular timeout: %s, slow test timeout: %s" % + (self._options.time_out_ms, self._options.slow_time_out_ms)) + + self._print_config('Command line: ' + ' '.join(self._port.driver_cmd_line())) + self._print_config('') + + def print_expected(self, num_all_test_files, result_summary, tests_with_result_type_callback): + self._print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files)) + self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback) + self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback) + self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback) + self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped", tests_with_result_type_callback) + self._print_expected('') + + if self._options.repeat_each > 1: + self._print_expected('Running each test %d times.' % self._options.repeat_each) + if self._options.iterations > 1: + self._print_expected('Running %d iterations of the tests.' % self._options.iterations) + if self._options.iterations > 1 or self._options.repeat_each > 1: + self._print_expected('') + + def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards): + driver_name = self._port.driver_name() + if num_workers == 1: + self._print_config("Running 1 %s over %s." % + (driver_name, grammar.pluralize('shard', num_shards))) + else: + self._print_config("Running %d %ss in parallel over %d shards (%d locked)." % + (num_workers, driver_name, num_shards, num_locked_shards)) + self._print_config('') + + def _print_expected_results_of_type(self, result_summary, + result_type, result_type_str, tests_with_result_type_callback): + """Print the number of the tests in a given result class. + + Args: + result_summary - the object containing all the results to report on + result_type - the particular result type to report in the summary. + result_type_str - a string description of the result_type. + expectations - populated TestExpectations object for stats + """ + tests = tests_with_result_type_callback(result_type) + now = result_summary.tests_by_timeline[test_expectations.NOW] + wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX] + + # We use a fancy format string in order to print the data out in a + # nicely-aligned table. + fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)" + % (self._num_digits(now), self._num_digits(wontfix))) + self._print_expected(fmtstr % + (len(tests), result_type_str, len(tests & now), len(tests & wontfix))) + + def _num_digits(self, num): + """Returns the number of digits needed to represent the length of a + sequence.""" + ndigits = 1 + if len(num): + ndigits = int(math.log10(len(num))) + 1 + return ndigits + + def print_results(self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results): + self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary) + self._print_result_summary(result_summary) + + self.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected) + + self.print_unexpected_results(unexpected_results) + + def _print_timing_statistics(self, total_time, thread_timings, + directory_test_timings, individual_test_timings, + result_summary): + """Record timing-specific information for the test run. + + Args: + total_time: total elapsed time (in seconds) for the test run + thread_timings: wall clock time each thread ran for + directory_test_timings: timing by directory + individual_test_timings: timing by file + result_summary: summary object for the test run + """ + self.print_timing("Test timing:") + self.print_timing(" %6.2f total testing time" % total_time) + self.print_timing("") + self.print_timing("Thread timing:") + cuml_time = 0 + for t in thread_timings: + self.print_timing(" %10s: %5d tests, %6.2f secs" % + (t['name'], t['num_tests'], t['total_time'])) + cuml_time += t['total_time'] + self.print_timing(" %6.2f cumulative, %6.2f optimal" % + (cuml_time, cuml_time / int(self._options.child_processes))) + self.print_timing("") + + self._print_aggregate_test_statistics(individual_test_timings) + self._print_individual_test_times(individual_test_timings, + result_summary) + self._print_directory_timings(directory_test_timings) + + def _print_aggregate_test_statistics(self, individual_test_timings): + """Prints aggregate statistics (e.g. median, mean, etc.) for all tests. + Args: + individual_test_timings: List of TestResults for all tests. + """ + times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings] + self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", + times_for_dump_render_tree) + + def _print_individual_test_times(self, individual_test_timings, + result_summary): + """Prints the run times for slow, timeout and crash tests. + Args: + individual_test_timings: List of TestStats for all tests. + result_summary: summary object for test run + """ + # Reverse-sort by the time spent in DumpRenderTree. + individual_test_timings.sort(lambda a, b: + cmp(b.test_run_time, a.test_run_time)) + + num_printed = 0 + slow_tests = [] + timeout_or_crash_tests = [] + unexpected_slow_tests = [] + for test_tuple in individual_test_timings: + test_name = test_tuple.test_name + is_timeout_crash_or_slow = False + if test_name in result_summary.slow_tests: + is_timeout_crash_or_slow = True + slow_tests.append(test_tuple) + + if test_name in result_summary.failures: + result = result_summary.results[test_name].type + if (result == test_expectations.TIMEOUT or + result == test_expectations.CRASH): + is_timeout_crash_or_slow = True + timeout_or_crash_tests.append(test_tuple) + + if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG): + num_printed = num_printed + 1 + unexpected_slow_tests.append(test_tuple) + + self.print_timing("") + self._print_test_list_timing("%s slowest tests that are not " + "marked as SLOW and did not timeout/crash:" % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests) + self.print_timing("") + self._print_test_list_timing("Tests marked as SLOW:", slow_tests) + self.print_timing("") + self._print_test_list_timing("Tests that timed out or crashed:", + timeout_or_crash_tests) + self.print_timing("") + + def _print_test_list_timing(self, title, test_list): + """Print timing info for each test. + + Args: + title: section heading + test_list: tests that fall in this section + """ + if self.disabled('slowest'): + return + + self.print_timing(title) + for test_tuple in test_list: + test_run_time = round(test_tuple.test_run_time, 1) + self.print_timing(" %s took %s seconds" % (test_tuple.test_name, test_run_time)) + + def _print_directory_timings(self, directory_test_timings): + """Print timing info by directory for any directories that + take > 10 seconds to run. + + Args: + directory_test_timing: time info for each directory + """ + timings = [] + for directory in directory_test_timings: + num_tests, time_for_directory = directory_test_timings[directory] + timings.append((round(time_for_directory, 1), directory, + num_tests)) + timings.sort() + + self.print_timing("Time to process slowest subdirectories:") + min_seconds_to_print = 10 + for timing in timings: + if timing[0] > min_seconds_to_print: + self.print_timing( + " %s took %s seconds to run %s tests." % (timing[1], + timing[0], timing[2])) + self.print_timing("") + + def _print_statistics_for_test_timings(self, title, timings): + """Prints the median, mean and standard deviation of the values in + timings. + + Args: + title: Title for these timings. + timings: A list of floats representing times. + """ + self.print_timing(title) + timings.sort() + + num_tests = len(timings) + if not num_tests: + return + percentile90 = timings[int(.9 * num_tests)] + percentile99 = timings[int(.99 * num_tests)] + + if num_tests % 2 == 1: + median = timings[((num_tests - 1) / 2) - 1] + else: + lower = timings[num_tests / 2 - 1] + upper = timings[num_tests / 2] + median = (float(lower + upper)) / 2 + + mean = sum(timings) / num_tests + + for timing in timings: + sum_of_deviations = math.pow(timing - mean, 2) + + std_deviation = math.sqrt(sum_of_deviations / num_tests) + self.print_timing(" Median: %6.3f" % median) + self.print_timing(" Mean: %6.3f" % mean) + self.print_timing(" 90th percentile: %6.3f" % percentile90) + self.print_timing(" 99th percentile: %6.3f" % percentile99) + self.print_timing(" Standard dev: %6.3f" % std_deviation) + self.print_timing("") + + def _print_result_summary(self, result_summary): + """Print a short summary about how many tests passed. + + Args: + result_summary: information to log + """ + failed = result_summary.total_failures + total = result_summary.total - result_summary.expected_skips + passed = total - failed + pct_passed = 0.0 + if total > 0: + pct_passed = float(passed) * 100 / total + + self.print_actual("") + self.print_actual("=> Results: %d/%d tests passed (%.1f%%)" % + (passed, total, pct_passed)) + self.print_actual("") + self._print_result_summary_entry(result_summary, + test_expectations.NOW, "Tests to be fixed") + + self.print_actual("") + self._print_result_summary_entry(result_summary, + test_expectations.WONTFIX, + "Tests that will only be fixed if they crash (WONTFIX)") + self.print_actual("") + + def _print_result_summary_entry(self, result_summary, timeline, + heading): + """Print a summary block of results for a particular timeline of test. + + Args: + result_summary: summary to print results for + timeline: the timeline to print results for (NOT, WONTFIX, etc.) + heading: a textual description of the timeline + """ + total = len(result_summary.tests_by_timeline[timeline]) + not_passing = (total - + len(result_summary.tests_by_expectation[test_expectations.PASS] & + result_summary.tests_by_timeline[timeline])) + self.print_actual("=> %s (%d):" % (heading, not_passing)) + + for result in TestExpectations.EXPECTATION_ORDER: + if result == test_expectations.PASS: + continue + results = (result_summary.tests_by_expectation[result] & + result_summary.tests_by_timeline[timeline]) + desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result] + if not_passing and len(results): + pct = len(results) * 100.0 / not_passing + self.print_actual(" %5d %-24s (%4.1f%%)" % + (len(results), desc[len(results) != 1], pct)) + + def print_actual(self, msg): if self.disabled('actual'): return self._buildbot_stream.write("%s\n" % msg) - def print_config(self, msg): + def _print_config(self, msg): self.write(msg, 'config') - def print_expected(self, msg): + def _print_expected(self, msg): self.write(msg, 'expected') def print_timing(self, msg): @@ -235,6 +535,10 @@ class Printer(object): self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str)) self._write("") + def print_finished_test(self, result, expected, exp_str, got_str, result_summary, retrying, test_files_list): + self.print_test_result(result, expected, exp_str, got_str) + self.print_progress(result_summary, retrying, test_files_list) + def print_test_result(self, result, expected, exp_str, got_str): """Print the result of the test as determined by --print. @@ -396,7 +700,7 @@ class Printer(object): if len(unexpected_results['tests']) and self._options.verbose: self._buildbot_stream.write("%s\n" % ("-" * 78)) - def print_update(self, msg): + def write_update(self, msg): if self.disabled('updates'): return self._meter.write_update(msg) diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py index 1312050e9..f8dd61db7 100644 --- a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py +++ b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py @@ -184,19 +184,30 @@ class Testprinter(unittest.TestCase): # buildbot is expecting. pass + def test_fallback_path_in_config(self): + printer, err, out = self.get_printer(['--print', 'everything']) + # FIXME: it's lame that i have to set these options directly. + printer._options.results_directory = '/tmp' + printer._options.pixel_tests = True + printer._options.new_baseline = True + printer._options.time_out_ms = 6000 + printer._options.slow_time_out_ms = 12000 + printer.print_config() + self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue()) + def test_print_config(self): - self.do_switch_tests('print_config', 'config', to_buildbot=False) + self.do_switch_tests('_print_config', 'config', to_buildbot=False) def test_print_expected(self): - self.do_switch_tests('print_expected', 'expected', to_buildbot=False) + self.do_switch_tests('_print_expected', 'expected', to_buildbot=False) def test_print_timing(self): self.do_switch_tests('print_timing', 'timing', to_buildbot=False) - def test_print_update(self): + def test_write_update(self): # Note that there shouldn't be a carriage return here; updates() # are meant to be overwritten. - self.do_switch_tests('print_update', 'updates', to_buildbot=False, + self.do_switch_tests('write_update', 'updates', to_buildbot=False, message='hello', exp_err=['hello']) def test_print_one_line_summary(self): @@ -405,25 +416,26 @@ class Testprinter(unittest.TestCase): all pass on the second run). """ + test_is_slow = False paths, rs, exp = self.get_result_summary(tests, expectations) if expected: - rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected) - rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected) + rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow) + rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow) elif passing: - rs.add(self.get_result('passes/text.html'), expected) - rs.add(self.get_result('failures/expected/timeout.html'), expected) - rs.add(self.get_result('failures/expected/crash.html'), expected) + rs.add(self.get_result('passes/text.html'), expected, test_is_slow) + rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow) + rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow) else: - rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected) - rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected) - rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected) + rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow) + rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow) + rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow) retry = rs if flaky: paths, retry, exp = self.get_result_summary(tests, expectations) - retry.add(self.get_result('passes/text.html'), True) - retry.add(self.get_result('failures/expected/timeout.html'), True) - retry.add(self.get_result('failures/expected/crash.html'), True) + retry.add(self.get_result('passes/text.html'), True, test_is_slow) + retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow) + retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow) unexpected_results = manager.summarize_results(self._port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False) return unexpected_results diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py index aaad85d5c..ba1153087 100644 --- a/Tools/Scripts/webkitpy/style/checkers/cpp.py +++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py @@ -1161,14 +1161,19 @@ class _FileState(object): self._clean_lines = clean_lines if file_extension in ['m', 'mm']: self._is_objective_c = True + self._is_c = False elif file_extension == 'h': # In the case of header files, it is unknown if the file - # is objective c or not, so set this value to None and then + # is c / objective c or not, so set this value to None and then # if it is requested, use heuristics to guess the value. self._is_objective_c = None + self._is_c = None + elif file_extension == 'c': + self._is_c = True + self._is_objective_c = False else: self._is_objective_c = False - self._is_c = file_extension == 'c' + self._is_c = False def set_did_inside_namespace_indent_warning(self): self._did_inside_namespace_indent_warning = True @@ -1188,9 +1193,21 @@ class _FileState(object): self._is_objective_c = False return self._is_objective_c + def is_c(self): + if self._is_c is None: + for line in self._clean_lines.lines: + # if extern "C" is found, then it is a good indication + # that we have a C header file. + if line.startswith('extern "C"'): + self._is_c = True + break + else: + self._is_c = False + return self._is_c + def is_c_or_objective_c(self): """Return whether the file extension corresponds to C or Objective-C.""" - return self._is_c or self.is_objective_c() + return self.is_c() or self.is_objective_c() def check_for_non_standard_constructs(clean_lines, line_number, diff --git a/Tools/Scripts/webkitpy/test/finder.py b/Tools/Scripts/webkitpy/test/finder.py index 132072d82..fcbb0e9cf 100644 --- a/Tools/Scripts/webkitpy/test/finder.py +++ b/Tools/Scripts/webkitpy/test/finder.py @@ -101,7 +101,7 @@ class Finder(object): return tree.to_module(path) return None - def find_names(self, args, skip_integrationtests, find_all): + def find_names(self, args, skip_integrationtests, find_all, skip_if_parallel=True): suffixes = ['_unittest.py'] if not skip_integrationtests: suffixes.append('_integrationtest.py') @@ -112,7 +112,7 @@ class Finder(object): names.extend(self._find_names_for_arg(arg, suffixes)) return names - return self._default_names(suffixes, find_all) + return self._default_names(suffixes, find_all, skip_if_parallel) def _find_names_for_arg(self, arg, suffixes): realpath = self.filesystem.realpath(arg) @@ -145,7 +145,7 @@ class Finder(object): return tree.find_modules(suffixes, path) return [] - def _default_names(self, suffixes, find_all): + def _default_names(self, suffixes, find_all, skip_if_parallel): modules = [] for tree in self.trees: modules.extend(tree.find_modules(suffixes)) diff --git a/Tools/Scripts/webkitpy/test/finder_unittest.py b/Tools/Scripts/webkitpy/test/finder_unittest.py index 09048b159..386c579c7 100644 --- a/Tools/Scripts/webkitpy/test/finder_unittest.py +++ b/Tools/Scripts/webkitpy/test/finder_unittest.py @@ -47,18 +47,15 @@ class FinderTest(unittest.TestCase): # Here we have to jump through a hoop to make sure test-webkitpy doesn't log # any messages from these tests :(. self.root_logger = logging.getLogger() - self.log_handler = None - for h in self.root_logger.handlers: - if getattr(h, 'name', None) == 'webkitpy.test.printer': - self.log_handler = h - break - if self.log_handler: - self.log_level = self.log_handler.level - self.log_handler.level = logging.CRITICAL + self.log_levels = [] + self.log_handlers = self.root_logger.handlers[:] + for handler in self.log_handlers: + self.log_levels.append(handler.level) + handler.level = logging.CRITICAL def tearDown(self): - if self.log_handler: - self.log_handler.setLevel(self.log_level) + for handler in self.log_handlers: + handler.level = self.log_levels.pop(0) def test_additional_system_paths(self): self.assertEquals(self.finder.additional_paths(['/usr']), diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py index 2048d9e59..abb297b2b 100644 --- a/Tools/Scripts/webkitpy/test/main.py +++ b/Tools/Scripts/webkitpy/test/main.py @@ -63,6 +63,8 @@ class Tester(object): help='do not run the integration tests') parser.add_option('-p', '--pass-through', action='store_true', default=False, help='be debugger friendly by passing captured output through to the system') + parser.add_option('-j', '--child-processes', action='store', type='int', default=1, + help='number of tests to run in parallel') parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. ' 'If no args are given, all the tests will be run.') @@ -75,7 +77,7 @@ class Tester(object): self.finder.clean_trees() - names = self.finder.find_names(args, self._options.skip_integrationtests, self._options.all) + names = self.finder.find_names(args, self._options.skip_integrationtests, self._options.all, self._options.child_processes != 1) if not names: _log.error('No tests to run') return False diff --git a/Tools/Scripts/webkitpy/test/main_unittest.py b/Tools/Scripts/webkitpy/test/main_unittest.py index 2cf6df4a2..61e49a7b9 100644 --- a/Tools/Scripts/webkitpy/test/main_unittest.py +++ b/Tools/Scripts/webkitpy/test/main_unittest.py @@ -41,7 +41,7 @@ class TesterTest(unittest.TestCase): root_logger.handlers = [] tester.printer.stream = errors - tester.finder.find_names = lambda args, skip_integration, run_all: [] + tester.finder.find_names = lambda args, skip_integration, run_all, skip_if_parallel: [] oc = OutputCapture() try: oc.capture_output() diff --git a/Tools/Scripts/webkitpy/test/printer.py b/Tools/Scripts/webkitpy/test/printer.py index 77e28b8d1..042fba13c 100644 --- a/Tools/Scripts/webkitpy/test/printer.py +++ b/Tools/Scripts/webkitpy/test/printer.py @@ -22,10 +22,10 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging -import re import StringIO from webkitpy.common.system import outputcapture +from webkitpy.layout_tests.views.metered_stream import MeteredStream _log = logging.getLogger(__name__) @@ -33,12 +33,14 @@ _log = logging.getLogger(__name__) class Printer(object): def __init__(self, stream, options=None): self.stream = stream + self.meter = None self.options = options - self.test_description = re.compile("(\w+) \(([\w.]+)\)") - - def test_name(self, test): - m = self.test_description.match(str(test)) - return "%s.%s" % (m.group(2), m.group(1)) + self.num_tests = 0 + self.num_completed = 0 + self.running_tests = [] + self.completed_tests = [] + if options: + self.configure(options) def configure(self, options): self.options = options @@ -53,6 +55,8 @@ class Printer(object): elif options.verbose == 2: log_level = logging.DEBUG + self.meter = MeteredStream(self.stream, (options.verbose == 2)) + handler = logging.StreamHandler(self.stream) # We constrain the level on the handler rather than on the root # logger itself. This is probably better because the handler is @@ -98,59 +102,65 @@ class Printer(object): if self.options.pass_through: outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream - def print_started_test(self, test_name): + def print_started_test(self, source, test_name): + self.running_tests.append(test_name) + if len(self.running_tests) > 1: + suffix = ' (+%d)' % (len(self.running_tests) - 1) + else: + suffix = '' + if self.options.verbose: - self.stream.write(test_name) + write = self.meter.write_update + else: + write = self.meter.write_throttled_update + + write(self._test_line(self.running_tests[0], suffix)) def print_finished_test(self, result, test_name, test_time, failure, err): - timing = '' - if self.options.timing: - timing = ' %.4fs' % test_time - if self.options.verbose: - if failure: - msg = ' failed' - elif err: - msg = ' erred' + write = self.meter.writeln + if failure: + lines = failure[0][1].splitlines() + [''] + suffix = ' failed:' + elif err: + lines = err[0][1].splitlines() + [''] + suffix = ' erred:' + else: + suffix = ' passed' + lines = [] + if self.options.verbose: + write = self.meter.writeln else: - msg = ' passed' - self.stream.write(msg + timing + '\n') + write = self.meter.write_throttled_update + if self.options.timing: + suffix += ' %.4fs' % test_time + + self.num_completed += 1 + + if test_name == self.running_tests[0]: + self.completed_tests.insert(0, [test_name, suffix, lines]) else: - if failure: - msg = 'F' - elif err: - msg = 'E' + self.completed_tests.append([test_name, suffix, lines]) + self.running_tests.remove(test_name) + + for test_name, msg, lines in self.completed_tests: + if lines: + self.meter.writeln(self._test_line(test_name, msg)) + for line in lines: + self.meter.writeln(' ' + line) else: - msg = '.' - self.stream.write(msg) + write(self._test_line(test_name, msg)) + self.completed_tests = [] - def print_result(self, result, run_time): - self.stream.write('\n') - - for (test, err) in result.errors: - self.stream.write("=" * 80 + '\n') - self.stream.write("ERROR: " + self.test_name(test) + '\n') - self.stream.write("-" * 80 + '\n') - for line in err.splitlines(): - self.stream.write(line + '\n') - self.stream.write('\n') - - for (test, failure) in result.failures: - self.stream.write("=" * 80 + '\n') - self.stream.write("FAILURE: " + self.test_name(test) + '\n') - self.stream.write("-" * 80 + '\n') - for line in failure.splitlines(): - self.stream.write(line + '\n') - self.stream.write('\n') - - self.stream.write('-' * 80 + '\n') - self.stream.write('Ran %d test%s in %.3fs\n' % - (result.testsRun, result.testsRun != 1 and "s" or "", run_time)) + def _test_line(self, test_name, suffix): + return '[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix) + def print_result(self, result, run_time): + write = self.meter.writeln + write('Ran %d test%s in %.3fs' % (result.testsRun, result.testsRun != 1 and "s" or "", run_time)) if result.wasSuccessful(): - self.stream.write('\nOK\n') + write('\nOK\n') else: - self.stream.write('FAILED (failures=%d, errors=%d)\n' % - (len(result.failures), len(result.errors))) + write('FAILED (failures=%d, errors=%d)\n' % (len(result.failures), len(result.errors))) class _CaptureAndPassThroughStream(object): diff --git a/Tools/Scripts/webkitpy/test/runner.py b/Tools/Scripts/webkitpy/test/runner.py index 9c952075e..fd8af6fe0 100644 --- a/Tools/Scripts/webkitpy/test/runner.py +++ b/Tools/Scripts/webkitpy/test/runner.py @@ -23,18 +23,30 @@ """code to actually run a list of python tests.""" import logging +import re import time import unittest +from webkitpy.common import message_pool _log = logging.getLogger(__name__) +_test_description = re.compile("(\w+) \(([\w.]+)\)") + + +def _test_name(test): + m = _test_description.match(str(test)) + return "%s.%s" % (m.group(2), m.group(1)) + + class Runner(object): def __init__(self, printer, options, loader): self.options = options self.printer = printer self.loader = loader + self.result = unittest.TestResult() + self.worker_factory = lambda caller: _Worker(caller, self.loader) def all_test_names(self, suite): names = [] @@ -42,34 +54,48 @@ class Runner(object): for t in suite._tests: names.extend(self.all_test_names(t)) else: - names.append(self.printer.test_name(suite)) + names.append(_test_name(suite)) return names def run(self, suite): run_start_time = time.time() all_test_names = self.all_test_names(suite) + self.printer.num_tests = len(all_test_names) + + with message_pool.get(self, self.worker_factory, int(self.options.child_processes)) as pool: + pool.run(('test', test_name) for test_name in all_test_names) + + self.printer.print_result(self.result, time.time() - run_start_time) + return self.result + + def handle(self, message_name, source, test_name, delay=None, result=None): + if message_name == 'started_test': + self.printer.print_started_test(source, test_name) + return + + self.result.testsRun += 1 + self.result.errors.extend(result.errors) + self.result.failures.extend(result.failures) + self.printer.print_finished_test(source, test_name, delay, result.failures, result.errors) + + +class _Worker(object): + def __init__(self, caller, loader): + self._caller = caller + self._loader = loader + + def handle(self, message_name, source, test_name): + assert message_name == 'test' result = unittest.TestResult() - stop = run_start_time - for test_name in all_test_names: - self.printer.print_started_test(test_name) - num_failures = len(result.failures) - num_errors = len(result.errors) - - start = time.time() - # FIXME: it's kinda lame that we re-load the test suites for each - # test, and this may slow things down, but this makes implementing - # the logging easy and will also allow us to parallelize nicely. - self.loader.loadTestsFromName(test_name, None).run(result) - stop = time.time() - - err = None - failure = None - if len(result.failures) > num_failures: - failure = result.failures[num_failures][1] - elif len(result.errors) > num_errors: - err = result.errors[num_errors][1] - self.printer.print_finished_test(result, test_name, stop - start, failure, err) - - self.printer.print_result(result, stop - run_start_time) - - return result + start = time.time() + self._caller.post('started_test', test_name) + self._loader.loadTestsFromName(test_name, None).run(result) + + # The tests in the TestResult contain file objects and other unpicklable things; we only + # care about the test name, so we rewrite the result to replace the test with the test name. + # FIXME: We need an automated test for this, but I don't know how to write an automated + # test that will fail in this case that doesn't get picked up by test-webkitpy normally :(. + result.failures = [(_test_name(failure[0]), failure[1]) for failure in result.failures] + result.errors = [(_test_name(error[0]), error[1]) for error in result.errors] + + self._caller.post('finished_test', test_name, time.time() - start, result) diff --git a/Tools/Scripts/webkitpy/test/runner_unittest.py b/Tools/Scripts/webkitpy/test/runner_unittest.py index 1cf0146fb..07c5c31ea 100644 --- a/Tools/Scripts/webkitpy/test/runner_unittest.py +++ b/Tools/Scripts/webkitpy/test/runner_unittest.py @@ -20,13 +20,14 @@ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +import logging import re import StringIO import unittest from webkitpy.tool.mocktool import MockOptions from webkitpy.test.printer import Printer -from webkitpy.test.runner import Runner +from webkitpy.test.runner import Runner, _Worker class FakeModuleSuite(object): @@ -69,44 +70,42 @@ class FakeLoader(object): class RunnerTest(unittest.TestCase): - def test_regular(self): - options = MockOptions(verbose=0, timing=False) + def setUp(self): + # Here we have to jump through a hoop to make sure test-webkitpy doesn't log + # any messages from these tests :(. + self.root_logger = logging.getLogger() + self.log_levels = [] + self.log_handlers = self.root_logger.handlers[:] + for handler in self.log_handlers: + self.log_levels.append(handler.level) + handler.level = logging.CRITICAL + + def tearDown(self): + for handler in self.log_handlers: + handler.level = self.log_levels.pop(0) + + def assert_run(self, verbose=0, timing=False, child_processes=1, quiet=False): + options = MockOptions(verbose=verbose, timing=timing, child_processes=child_processes, quiet=quiet, pass_through=False) stream = StringIO.StringIO() loader = FakeLoader(('test1 (Foo)', '.', ''), ('test2 (Foo)', 'F', 'test2\nfailed'), ('test3 (Foo)', 'E', 'test3\nerred')) - result = Runner(Printer(stream, options), options, loader).run(loader.top_suite()) + runner = Runner(Printer(stream, options), options, loader) + result = runner.run(loader.top_suite()) self.assertFalse(result.wasSuccessful()) self.assertEquals(result.testsRun, 3) self.assertEquals(len(result.failures), 1) self.assertEquals(len(result.errors), 1) # FIXME: check the output from the test + def test_regular(self): + self.assert_run() + def test_verbose(self): - options = MockOptions(verbose=1, timing=False) - stream = StringIO.StringIO() - loader = FakeLoader(('test1 (Foo)', '.', ''), - ('test2 (Foo)', 'F', 'test2\nfailed'), - ('test3 (Foo)', 'E', 'test3\nerred')) - result = Runner(Printer(stream, options), options, loader).run(loader.top_suite()) - self.assertFalse(result.wasSuccessful()) - self.assertEquals(result.testsRun, 3) - self.assertEquals(len(result.failures), 1) - self.assertEquals(len(result.errors), 1) - # FIXME: check the output from the test + self.assert_run(verbose=1) def test_timing(self): - options = MockOptions(verbose=0, timing=True) - stream = StringIO.StringIO() - loader = FakeLoader(('test1 (Foo)', '.', ''), - ('test2 (Foo)', 'F', 'test2\nfailed'), - ('test3 (Foo)', 'E', 'test3\nerred')) - result = Runner(Printer(stream, options), options, loader).run(loader.top_suite()) - self.assertFalse(result.wasSuccessful()) - self.assertEquals(result.testsRun, 3) - self.assertEquals(len(result.failures), 1) - self.assertEquals(len(result.errors), 1) - # FIXME: check the output from the test + self.assert_run(timing=True) if __name__ == '__main__': diff --git a/Tools/Scripts/webkitpy/webkitpy.pyproj b/Tools/Scripts/webkitpy/webkitpy.pyproj new file mode 100644 index 000000000..72135a8d4 --- /dev/null +++ b/Tools/Scripts/webkitpy/webkitpy.pyproj @@ -0,0 +1,540 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <PropertyGroup> + <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration> + <SchemaVersion>2.0</SchemaVersion> + <ProjectGuid>{59b0a791-93fe-40f8-a52b-ba19b73e8fa6}</ProjectGuid> + <ProjectHome>.</ProjectHome> + <StartupFile>layout_tests\run_webkit_tests.py</StartupFile> + <SearchPath> + </SearchPath> + <WorkingDirectory>../</WorkingDirectory> + <OutputPath>.</OutputPath> + <Name>webkitpy</Name> + <RootNamespace>webkitpy</RootNamespace> + <IsWindowsApplication>False</IsWindowsApplication> + <LaunchProvider>Standard Python launcher</LaunchProvider> + <CommandLineArguments>--platform=mock --no-pixel-tests --no-retry-failures</CommandLineArguments> + <InterpreterPath /> + <InterpreterArguments /> + </PropertyGroup> + <PropertyGroup Condition=" '$(Configuration)' == 'Debug' "> + <DebugSymbols>true</DebugSymbols> + <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging> + </PropertyGroup> + <PropertyGroup Condition=" '$(Configuration)' == 'Release' "> + <DebugSymbols>true</DebugSymbols> + <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging> + </PropertyGroup> + <ItemGroup> + <Compile Include="bindings\main.py" /> + <Compile Include="bindings\__init__.py" /> + <Compile Include="common\checkout\baselineoptimizer.py" /> + <Compile Include="common\checkout\baselineoptimizer_unittest.py" /> + <Compile Include="common\checkout\changelog.py" /> + <Compile Include="common\checkout\changelog_unittest.py" /> + <Compile Include="common\checkout\checkout.py" /> + <Compile Include="common\checkout\checkout_mock.py" /> + <Compile Include="common\checkout\checkout_unittest.py" /> + <Compile Include="common\checkout\commitinfo.py" /> + <Compile Include="common\checkout\commitinfo_unittest.py" /> + <Compile Include="common\checkout\deps.py" /> + <Compile Include="common\checkout\deps_mock.py" /> + <Compile Include="common\checkout\diff_parser.py" /> + <Compile Include="common\checkout\diff_parser_unittest.py" /> + <Compile Include="common\checkout\diff_test_data.py" /> + <Compile Include="common\checkout\scm\commitmessage.py" /> + <Compile Include="common\checkout\scm\detection.py" /> + <Compile Include="common\checkout\scm\detection_unittest.py" /> + <Compile Include="common\checkout\scm\git.py" /> + <Compile Include="common\checkout\scm\scm.py" /> + <Compile Include="common\checkout\scm\scm_mock.py" /> + <Compile Include="common\checkout\scm\scm_unittest.py" /> + <Compile Include="common\checkout\scm\svn.py" /> + <Compile Include="common\checkout\scm\__init__.py" /> + <Compile Include="common\checkout\__init__.py" /> + <Compile Include="common\checksvnconfigfile.py" /> + <Compile Include="common\config\build.py" /> + <Compile Include="common\config\build_unittest.py" /> + <Compile Include="common\config\committers.py" /> + <Compile Include="common\config\committers_unittest.py" /> + <Compile Include="common\config\committervalidator.py" /> + <Compile Include="common\config\committervalidator_unittest.py" /> + <Compile Include="common\config\contributionareas.py" /> + <Compile Include="common\config\contributionareas_unittest.py" /> + <Compile Include="common\config\irc.py" /> + <Compile Include="common\config\ports.py" /> + <Compile Include="common\config\ports_mock.py" /> + <Compile Include="common\config\ports_unittest.py" /> + <Compile Include="common\config\urls.py" /> + <Compile Include="common\config\urls_unittest.py" /> + <Compile Include="common\config\__init__.py" /> + <Compile Include="common\editdistance.py" /> + <Compile Include="common\editdistance_unittest.py" /> + <Compile Include="common\find_files.py" /> + <Compile Include="common\find_files_unittest.py" /> + <Compile Include="common\host.py" /> + <Compile Include="common\host_mock.py" /> + <Compile Include="common\lru_cache.py" /> + <Compile Include="common\lru_cache_unittest.py" /> + <Compile Include="common\memoized.py" /> + <Compile Include="common\memoized_unittest.py" /> + <Compile Include="common\message_pool.py" /> + <Compile Include="common\net\bugzilla\attachment.py" /> + <Compile Include="common\net\bugzilla\bug.py" /> + <Compile Include="common\net\bugzilla\bugzilla.py" /> + <Compile Include="common\net\bugzilla\bugzilla_mock.py" /> + <Compile Include="common\net\bugzilla\bugzilla_unittest.py" /> + <Compile Include="common\net\bugzilla\bug_unittest.py" /> + <Compile Include="common\net\bugzilla\__init__.py" /> + <Compile Include="common\net\buildbot\buildbot.py" /> + <Compile Include="common\net\buildbot\buildbot_mock.py" /> + <Compile Include="common\net\buildbot\buildbot_unittest.py" /> + <Compile Include="common\net\buildbot\chromiumbuildbot.py" /> + <Compile Include="common\net\buildbot\__init__.py" /> + <Compile Include="common\net\credentials.py" /> + <Compile Include="common\net\credentials_unittest.py" /> + <Compile Include="common\net\failuremap.py" /> + <Compile Include="common\net\failuremap_unittest.py" /> + <Compile Include="common\net\file_uploader.py" /> + <Compile Include="common\net\htdigestparser.py" /> + <Compile Include="common\net\htdigestparser_unittest.py" /> + <Compile Include="common\net\irc\ircbot.py" /> + <Compile Include="common\net\irc\ircproxy.py" /> + <Compile Include="common\net\irc\ircproxy_unittest.py" /> + <Compile Include="common\net\irc\irc_mock.py" /> + <Compile Include="common\net\irc\__init__.py" /> + <Compile Include="common\net\layouttestresults.py" /> + <Compile Include="common\net\layouttestresults_unittest.py" /> + <Compile Include="common\net\networktransaction.py" /> + <Compile Include="common\net\networktransaction_unittest.py" /> + <Compile Include="common\net\omahaproxy.py" /> + <Compile Include="common\net\omahaproxy_unittest.py" /> + <Compile Include="common\net\regressionwindow.py" /> + <Compile Include="common\net\resultsjsonparser.py" /> + <Compile Include="common\net\resultsjsonparser_unittest.py" /> + <Compile Include="common\net\statusserver.py" /> + <Compile Include="common\net\statusserver_mock.py" /> + <Compile Include="common\net\statusserver_unittest.py" /> + <Compile Include="common\net\unittestresults.py" /> + <Compile Include="common\net\unittestresults_unittest.py" /> + <Compile Include="common\net\web.py" /> + <Compile Include="common\net\web_mock.py" /> + <Compile Include="common\net\__init__.py" /> + <Compile Include="common\newstringio.py" /> + <Compile Include="common\newstringio_unittest.py" /> + <Compile Include="common\prettypatch.py" /> + <Compile Include="common\prettypatch_unittest.py" /> + <Compile Include="common\read_checksum_from_png.py" /> + <Compile Include="common\read_checksum_from_png_unittest.py" /> + <Compile Include="common\system\autoinstall.py" /> + <Compile Include="common\system\crashlogs.py" /> + <Compile Include="common\system\crashlogs_unittest.py" /> + <Compile Include="common\system\deprecated_logging.py" /> + <Compile Include="common\system\deprecated_logging_unittest.py" /> + <Compile Include="common\system\environment.py" /> + <Compile Include="common\system\environment_unittest.py" /> + <Compile Include="common\system\executive.py" /> + <Compile Include="common\system\executive_mock.py" /> + <Compile Include="common\system\executive_unittest.py" /> + <Compile Include="common\system\fileset.py" /> + <Compile Include="common\system\filesystem.py" /> + <Compile Include="common\system\filesystem_mock.py" /> + <Compile Include="common\system\filesystem_mock_unittest.py" /> + <Compile Include="common\system\filesystem_unittest.py" /> + <Compile Include="common\system\file_lock.py" /> + <Compile Include="common\system\file_lock_integrationtest.py" /> + <Compile Include="common\system\logtesting.py" /> + <Compile Include="common\system\logutils.py" /> + <Compile Include="common\system\logutils_unittest.py" /> + <Compile Include="common\system\outputcapture.py" /> + <Compile Include="common\system\outputcapture_unittest.py" /> + <Compile Include="common\system\path.py" /> + <Compile Include="common\system\path_unittest.py" /> + <Compile Include="common\system\platforminfo.py" /> + <Compile Include="common\system\platforminfo_mock.py" /> + <Compile Include="common\system\platforminfo_unittest.py" /> + <Compile Include="common\system\stack_utils.py" /> + <Compile Include="common\system\stack_utils_unittest.py" /> + <Compile Include="common\system\systemhost.py" /> + <Compile Include="common\system\systemhost_mock.py" /> + <Compile Include="common\system\urlfetcher.py" /> + <Compile Include="common\system\urlfetcher_mock.py" /> + <Compile Include="common\system\user.py" /> + <Compile Include="common\system\user_mock.py" /> + <Compile Include="common\system\user_unittest.py" /> + <Compile Include="common\system\workspace.py" /> + <Compile Include="common\system\workspace_mock.py" /> + <Compile Include="common\system\workspace_unittest.py" /> + <Compile Include="common\system\zipfileset.py" /> + <Compile Include="common\system\zipfileset_mock.py" /> + <Compile Include="common\system\zipfileset_unittest.py" /> + <Compile Include="common\system\zip_mock.py" /> + <Compile Include="common\system\__init__.py" /> + <Compile Include="common\thread\messagepump.py" /> + <Compile Include="common\thread\messagepump_unittest.py" /> + <Compile Include="common\thread\threadedmessagequeue.py" /> + <Compile Include="common\thread\threadedmessagequeue_unittest.py" /> + <Compile Include="common\thread\__init__.py" /> + <Compile Include="common\version_check.py" /> + <Compile Include="common\watchlist\amountchangedpattern.py" /> + <Compile Include="common\watchlist\amountchangedpattern_unittest.py" /> + <Compile Include="common\watchlist\changedlinepattern.py" /> + <Compile Include="common\watchlist\changedlinepattern_unittest.py" /> + <Compile Include="common\watchlist\filenamepattern.py" /> + <Compile Include="common\watchlist\filenamepattern_unittest.py" /> + <Compile Include="common\watchlist\watchlist.py" /> + <Compile Include="common\watchlist\watchlistloader.py" /> + <Compile Include="common\watchlist\watchlistloader_unittest.py" /> + <Compile Include="common\watchlist\watchlistparser.py" /> + <Compile Include="common\watchlist\watchlistparser_unittest.py" /> + <Compile Include="common\watchlist\watchlistrule.py" /> + <Compile Include="common\watchlist\watchlistrule_unittest.py" /> + <Compile Include="common\watchlist\watchlist_mock.py" /> + <Compile Include="common\watchlist\watchlist_unittest.py" /> + <Compile Include="common\watchlist\__init__.py" /> + <Compile Include="common\webkitunittest.py" /> + <Compile Include="common\__init__.py" /> + <Compile Include="layout_tests\controllers\manager.py" /> + <Compile Include="layout_tests\controllers\manager_unittest.py" /> + <Compile Include="layout_tests\controllers\single_test_runner.py" /> + <Compile Include="layout_tests\controllers\test_expectations_editor.py" /> + <Compile Include="layout_tests\controllers\test_expectations_editor_unittest.py" /> + <Compile Include="layout_tests\controllers\test_result_writer.py" /> + <Compile Include="layout_tests\controllers\test_result_writer_unittest.py" /> + <Compile Include="layout_tests\controllers\worker.py" /> + <Compile Include="layout_tests\controllers\__init__.py" /> + <Compile Include="layout_tests\layout_package\json_layout_results_generator.py" /> + <Compile Include="layout_tests\layout_package\json_results_generator.py" /> + <Compile Include="layout_tests\layout_package\json_results_generator_unittest.py" /> + <Compile Include="layout_tests\layout_package\__init__.py" /> + <Compile Include="layout_tests\models\result_summary.py" /> + <Compile Include="layout_tests\models\test_configuration.py" /> + <Compile Include="layout_tests\models\test_configuration_unittest.py" /> + <Compile Include="layout_tests\models\test_expectations.py" /> + <Compile Include="layout_tests\models\test_expectations_unittest.py" /> + <Compile Include="layout_tests\models\test_failures.py" /> + <Compile Include="layout_tests\models\test_failures_unittest.py" /> + <Compile Include="layout_tests\models\test_input.py" /> + <Compile Include="layout_tests\models\test_results.py" /> + <Compile Include="layout_tests\models\test_results_unittest.py" /> + <Compile Include="layout_tests\models\__init__.py" /> + <Compile Include="layout_tests\port\apple.py" /> + <Compile Include="layout_tests\port\base.py" /> + <Compile Include="layout_tests\port\base_unittest.py" /> + <Compile Include="layout_tests\port\builders.py" /> + <Compile Include="layout_tests\port\builders_unittest.py" /> + <Compile Include="layout_tests\port\chromium.py" /> + <Compile Include="layout_tests\port\chromium_android.py" /> + <Compile Include="layout_tests\port\chromium_android_unittest.py" /> + <Compile Include="layout_tests\port\chromium_linux.py" /> + <Compile Include="layout_tests\port\chromium_linux_unittest.py" /> + <Compile Include="layout_tests\port\chromium_mac.py" /> + <Compile Include="layout_tests\port\chromium_mac_unittest.py" /> + <Compile Include="layout_tests\port\chromium_port_testcase.py" /> + <Compile Include="layout_tests\port\chromium_unittest.py" /> + <Compile Include="layout_tests\port\chromium_win.py" /> + <Compile Include="layout_tests\port\chromium_win_unittest.py" /> + <Compile Include="layout_tests\port\config.py" /> + <Compile Include="layout_tests\port\config_mock.py" /> + <Compile Include="layout_tests\port\config_standalone.py" /> + <Compile Include="layout_tests\port\config_unittest.py" /> + <Compile Include="layout_tests\port\driver.py" /> + <Compile Include="layout_tests\port\driver_unittest.py" /> + <Compile Include="layout_tests\port\efl.py" /> + <Compile Include="layout_tests\port\efl_unittest.py" /> + <Compile Include="layout_tests\port\factory.py" /> + <Compile Include="layout_tests\port\factory_unittest.py" /> + <Compile Include="layout_tests\port\gtk.py" /> + <Compile Include="layout_tests\port\gtk_unittest.py" /> + <Compile Include="layout_tests\port\http_lock.py" /> + <Compile Include="layout_tests\port\http_lock_unittest.py" /> + <Compile Include="layout_tests\port\leakdetector.py" /> + <Compile Include="layout_tests\port\leakdetector_unittest.py" /> + <Compile Include="layout_tests\port\mac.py" /> + <Compile Include="layout_tests\port\mac_unittest.py" /> + <Compile Include="layout_tests\port\mock_drt.py" /> + <Compile Include="layout_tests\port\mock_drt_unittest.py" /> + <Compile Include="layout_tests\port\port_testcase.py" /> + <Compile Include="layout_tests\port\pulseaudio_sanitizer.py" /> + <Compile Include="layout_tests\port\qt.py" /> + <Compile Include="layout_tests\port\qt_unittest.py" /> + <Compile Include="layout_tests\port\server_process.py" /> + <Compile Include="layout_tests\port\server_process_unittest.py" /> + <Compile Include="layout_tests\port\test.py" /> + <Compile Include="layout_tests\port\webkit.py" /> + <Compile Include="layout_tests\port\webkit_unittest.py" /> + <Compile Include="layout_tests\port\win.py" /> + <Compile Include="layout_tests\port\win_unittest.py" /> + <Compile Include="layout_tests\port\xvfbdriver.py" /> + <Compile Include="layout_tests\port\__init__.py" /> + <Compile Include="layout_tests\reftests\extract_reference_link.py" /> + <Compile Include="layout_tests\reftests\extract_reference_link_unittest.py" /> + <Compile Include="layout_tests\reftests\__init__.py" /> + <Compile Include="layout_tests\run_webkit_tests.py" /> + <Compile Include="layout_tests\run_webkit_tests_integrationtest.py" /> + <Compile Include="layout_tests\servers\apache_http_server.py" /> + <Compile Include="layout_tests\servers\apache_http_server_unittest.py" /> + <Compile Include="layout_tests\servers\http_server.py" /> + <Compile Include="layout_tests\servers\http_server_base.py" /> + <Compile Include="layout_tests\servers\http_server_integrationtest.py" /> + <Compile Include="layout_tests\servers\http_server_unittest.py" /> + <Compile Include="layout_tests\servers\websocket_server.py" /> + <Compile Include="layout_tests\servers\__init__.py" /> + <Compile Include="layout_tests\views\metered_stream.py" /> + <Compile Include="layout_tests\views\metered_stream_unittest.py" /> + <Compile Include="layout_tests\views\printing.py" /> + <Compile Include="layout_tests\views\printing_unittest.py" /> + <Compile Include="layout_tests\views\__init__.py" /> + <Compile Include="layout_tests\__init__.py" /> + <Compile Include="performance_tests\perftest.py" /> + <Compile Include="performance_tests\perftestsrunner.py" /> + <Compile Include="performance_tests\perftestsrunner_unittest.py" /> + <Compile Include="performance_tests\perftest_unittest.py" /> + <Compile Include="performance_tests\__init__.py" /> + <Compile Include="style\checker.py" /> + <Compile Include="style\checkers\changelog.py" /> + <Compile Include="style\checkers\changelog_unittest.py" /> + <Compile Include="style\checkers\common.py" /> + <Compile Include="style\checkers\common_unittest.py" /> + <Compile Include="style\checkers\cpp.py" /> + <Compile Include="style\checkers\cpp_unittest.py" /> + <Compile Include="style\checkers\jsonchecker.py" /> + <Compile Include="style\checkers\jsonchecker_unittest.py" /> + <Compile Include="style\checkers\png.py" /> + <Compile Include="style\checkers\png_unittest.py" /> + <Compile Include="style\checkers\python.py" /> + <Compile Include="style\checkers\python_unittest.py" /> + <Compile Include="style\checkers\python_unittest_input.py" /> + <Compile Include="style\checkers\test_expectations.py" /> + <Compile Include="style\checkers\test_expectations_unittest.py" /> + <Compile Include="style\checkers\text.py" /> + <Compile Include="style\checkers\text_unittest.py" /> + <Compile Include="style\checkers\watchlist.py" /> + <Compile Include="style\checkers\watchlist_unittest.py" /> + <Compile Include="style\checkers\xcodeproj.py" /> + <Compile Include="style\checkers\xcodeproj_unittest.py" /> + <Compile Include="style\checkers\xml.py" /> + <Compile Include="style\checkers\xml_unittest.py" /> + <Compile Include="style\checkers\__init__.py" /> + <Compile Include="style\checker_unittest.py" /> + <Compile Include="style\error_handlers.py" /> + <Compile Include="style\error_handlers_unittest.py" /> + <Compile Include="style\filereader.py" /> + <Compile Include="style\filereader_unittest.py" /> + <Compile Include="style\filter.py" /> + <Compile Include="style\filter_unittest.py" /> + <Compile Include="style\main.py" /> + <Compile Include="style\main_unittest.py" /> + <Compile Include="style\optparser.py" /> + <Compile Include="style\optparser_unittest.py" /> + <Compile Include="style\patchreader.py" /> + <Compile Include="style\patchreader_unittest.py" /> + <Compile Include="style\__init__.py" /> + <Compile Include="test\finder.py" /> + <Compile Include="test\finder_unittest.py" /> + <Compile Include="test\main.py" /> + <Compile Include="test\main_unittest.py" /> + <Compile Include="test\printer.py" /> + <Compile Include="test\runner.py" /> + <Compile Include="test\runner_unittest.py" /> + <Compile Include="test\skip.py" /> + <Compile Include="test\skip_unittest.py" /> + <Compile Include="test\__init__.py" /> + <Compile Include="thirdparty\BeautifulSoup.py" /> + <Compile Include="thirdparty\mock.py" /> + <Compile Include="thirdparty\mod_pywebsocket\common.py" /> + <Compile Include="thirdparty\mod_pywebsocket\dispatch.py" /> + <Compile Include="thirdparty\mod_pywebsocket\extensions.py" /> + <Compile Include="thirdparty\mod_pywebsocket\handshake\draft75.py" /> + <Compile Include="thirdparty\mod_pywebsocket\handshake\hybi.py" /> + <Compile Include="thirdparty\mod_pywebsocket\handshake\hybi00.py" /> + <Compile Include="thirdparty\mod_pywebsocket\handshake\_base.py" /> + <Compile Include="thirdparty\mod_pywebsocket\handshake\__init__.py" /> + <Compile Include="thirdparty\mod_pywebsocket\headerparserhandler.py" /> + <Compile Include="thirdparty\mod_pywebsocket\http_header_util.py" /> + <Compile Include="thirdparty\mod_pywebsocket\memorizingfile.py" /> + <Compile Include="thirdparty\mod_pywebsocket\msgutil.py" /> + <Compile Include="thirdparty\mod_pywebsocket\standalone.py" /> + <Compile Include="thirdparty\mod_pywebsocket\stream.py" /> + <Compile Include="thirdparty\mod_pywebsocket\util.py" /> + <Compile Include="thirdparty\mod_pywebsocket\_stream_base.py" /> + <Compile Include="thirdparty\mod_pywebsocket\_stream_hixie75.py" /> + <Compile Include="thirdparty\mod_pywebsocket\_stream_hybi.py" /> + <Compile Include="thirdparty\mod_pywebsocket\__init__.py" /> + <Compile Include="thirdparty\ordered_dict.py" /> + <Compile Include="thirdparty\__init__.py" /> + <Compile Include="thirdparty\__init___unittest.py" /> + <Compile Include="tool\bot\botinfo.py" /> + <Compile Include="tool\bot\botinfo_unittest.py" /> + <Compile Include="tool\bot\commitqueuetask.py" /> + <Compile Include="tool\bot\commitqueuetask_unittest.py" /> + <Compile Include="tool\bot\earlywarningsystemtask.py" /> + <Compile Include="tool\bot\expectedfailures.py" /> + <Compile Include="tool\bot\expectedfailures_unittest.py" /> + <Compile Include="tool\bot\feeders.py" /> + <Compile Include="tool\bot\feeders_unittest.py" /> + <Compile Include="tool\bot\flakytestreporter.py" /> + <Compile Include="tool\bot\flakytestreporter_unittest.py" /> + <Compile Include="tool\bot\irc_command.py" /> + <Compile Include="tool\bot\irc_command_unittest.py" /> + <Compile Include="tool\bot\layouttestresultsreader.py" /> + <Compile Include="tool\bot\layouttestresultsreader_unittest.py" /> + <Compile Include="tool\bot\patchanalysistask.py" /> + <Compile Include="tool\bot\queueengine.py" /> + <Compile Include="tool\bot\queueengine_unittest.py" /> + <Compile Include="tool\bot\sheriff.py" /> + <Compile Include="tool\bot\sheriffircbot.py" /> + <Compile Include="tool\bot\sheriffircbot_unittest.py" /> + <Compile Include="tool\bot\sheriff_unittest.py" /> + <Compile Include="tool\bot\stylequeuetask.py" /> + <Compile Include="tool\bot\__init__.py" /> + <Compile Include="tool\commands\abstractlocalservercommand.py" /> + <Compile Include="tool\commands\abstractsequencedcommand.py" /> + <Compile Include="tool\commands\adduserstogroups.py" /> + <Compile Include="tool\commands\analyzechangelog.py" /> + <Compile Include="tool\commands\analyzechangelog_unittest.py" /> + <Compile Include="tool\commands\applywatchlistlocal.py" /> + <Compile Include="tool\commands\applywatchlistlocal_unittest.py" /> + <Compile Include="tool\commands\bugfortest.py" /> + <Compile Include="tool\commands\bugsearch.py" /> + <Compile Include="tool\commands\chromechannels.py" /> + <Compile Include="tool\commands\chromechannels_unittest.py" /> + <Compile Include="tool\commands\commandtest.py" /> + <Compile Include="tool\commands\download.py" /> + <Compile Include="tool\commands\download_unittest.py" /> + <Compile Include="tool\commands\earlywarningsystem.py" /> + <Compile Include="tool\commands\earlywarningsystem_unittest.py" /> + <Compile Include="tool\commands\expectations.py" /> + <Compile Include="tool\commands\findusers.py" /> + <Compile Include="tool\commands\gardenomatic.py" /> + <Compile Include="tool\commands\openbugs.py" /> + <Compile Include="tool\commands\openbugs_unittest.py" /> + <Compile Include="tool\commands\prettydiff.py" /> + <Compile Include="tool\commands\queries.py" /> + <Compile Include="tool\commands\queries_unittest.py" /> + <Compile Include="tool\commands\queues.py" /> + <Compile Include="tool\commands\queuestest.py" /> + <Compile Include="tool\commands\queues_unittest.py" /> + <Compile Include="tool\commands\rebaseline.py" /> + <Compile Include="tool\commands\rebaselineserver.py" /> + <Compile Include="tool\commands\rebaseline_unittest.py" /> + <Compile Include="tool\commands\roll.py" /> + <Compile Include="tool\commands\roll_unittest.py" /> + <Compile Include="tool\commands\sheriffbot.py" /> + <Compile Include="tool\commands\sheriffbot_unittest.py" /> + <Compile Include="tool\commands\stepsequence.py" /> + <Compile Include="tool\commands\suggestnominations.py" /> + <Compile Include="tool\commands\suggestnominations_unittest.py" /> + <Compile Include="tool\commands\upload.py" /> + <Compile Include="tool\commands\upload_unittest.py" /> + <Compile Include="tool\commands\__init__.py" /> + <Compile Include="tool\comments.py" /> + <Compile Include="tool\grammar.py" /> + <Compile Include="tool\grammar_unittest.py" /> + <Compile Include="tool\main.py" /> + <Compile Include="tool\mocktool.py" /> + <Compile Include="tool\mocktool_unittest.py" /> + <Compile Include="tool\multicommandtool.py" /> + <Compile Include="tool\multicommandtool_unittest.py" /> + <Compile Include="tool\servers\gardeningserver.py" /> + <Compile Include="tool\servers\gardeningserver_unittest.py" /> + <Compile Include="tool\servers\rebaselineserver.py" /> + <Compile Include="tool\servers\rebaselineserver_unittest.py" /> + <Compile Include="tool\servers\reflectionhandler.py" /> + <Compile Include="tool\servers\__init__.py" /> + <Compile Include="tool\steps\abstractstep.py" /> + <Compile Include="tool\steps\addsvnmimetypeforpng.py" /> + <Compile Include="tool\steps\addsvnmimetypeforpng_unittest.py" /> + <Compile Include="tool\steps\applypatch.py" /> + <Compile Include="tool\steps\applypatchwithlocalcommit.py" /> + <Compile Include="tool\steps\applywatchlist.py" /> + <Compile Include="tool\steps\applywatchlist_unittest.py" /> + <Compile Include="tool\steps\attachtobug.py" /> + <Compile Include="tool\steps\build.py" /> + <Compile Include="tool\steps\checkstyle.py" /> + <Compile Include="tool\steps\cleanworkingdirectory.py" /> + <Compile Include="tool\steps\cleanworkingdirectorywithlocalcommits.py" /> + <Compile Include="tool\steps\cleanworkingdirectory_unittest.py" /> + <Compile Include="tool\steps\closebug.py" /> + <Compile Include="tool\steps\closebugforlanddiff.py" /> + <Compile Include="tool\steps\closebugforlanddiff_unittest.py" /> + <Compile Include="tool\steps\closepatch.py" /> + <Compile Include="tool\steps\commit.py" /> + <Compile Include="tool\steps\commit_unittest.py" /> + <Compile Include="tool\steps\confirmdiff.py" /> + <Compile Include="tool\steps\createbug.py" /> + <Compile Include="tool\steps\editchangelog.py" /> + <Compile Include="tool\steps\ensurebugisopenandassigned.py" /> + <Compile Include="tool\steps\ensurelocalcommitifneeded.py" /> + <Compile Include="tool\steps\metastep.py" /> + <Compile Include="tool\steps\obsoletepatches.py" /> + <Compile Include="tool\steps\options.py" /> + <Compile Include="tool\steps\postdiff.py" /> + <Compile Include="tool\steps\postdiffforcommit.py" /> + <Compile Include="tool\steps\postdiffforrevert.py" /> + <Compile Include="tool\steps\preparechangelog.py" /> + <Compile Include="tool\steps\preparechangelogfordepsroll.py" /> + <Compile Include="tool\steps\preparechangelogforrevert.py" /> + <Compile Include="tool\steps\preparechangelogforrevert_unittest.py" /> + <Compile Include="tool\steps\preparechangelog_unittest.py" /> + <Compile Include="tool\steps\promptforbugortitle.py" /> + <Compile Include="tool\steps\reopenbugafterrollout.py" /> + <Compile Include="tool\steps\revertrevision.py" /> + <Compile Include="tool\steps\runtests.py" /> + <Compile Include="tool\steps\runtests_unittest.py" /> + <Compile Include="tool\steps\steps_unittest.py" /> + <Compile Include="tool\steps\suggestreviewers.py" /> + <Compile Include="tool\steps\suggestreviewers_unittest.py" /> + <Compile Include="tool\steps\update.py" /> + <Compile Include="tool\steps\updatechangelogswithreviewer.py" /> + <Compile Include="tool\steps\updatechangelogswithreview_unittest.py" /> + <Compile Include="tool\steps\updatechromiumdeps.py" /> + <Compile Include="tool\steps\update_unittest.py" /> + <Compile Include="tool\steps\validatechangelogs.py" /> + <Compile Include="tool\steps\validatechangelogs_unittest.py" /> + <Compile Include="tool\steps\validatereviewer.py" /> + <Compile Include="tool\steps\__init__.py" /> + <Compile Include="tool\__init__.py" /> + <Compile Include="to_be_moved\update_webgl_conformance_tests.py" /> + <Compile Include="to_be_moved\update_webgl_conformance_tests_unittest.py" /> + <Compile Include="to_be_moved\__init__.py" /> + <Compile Include="__init__.py" /> + </ItemGroup> + <ItemGroup> + <Folder Include="bindings\" /> + <Folder Include="common\" /> + <Folder Include="common\checkout\" /> + <Folder Include="common\checkout\scm\" /> + <Folder Include="common\config\" /> + <Folder Include="common\net\" /> + <Folder Include="common\net\bugzilla\" /> + <Folder Include="common\net\buildbot\" /> + <Folder Include="common\net\irc\" /> + <Folder Include="common\system\" /> + <Folder Include="common\thread\" /> + <Folder Include="common\watchlist\" /> + <Folder Include="layout_tests\" /> + <Folder Include="layout_tests\controllers\" /> + <Folder Include="layout_tests\layout_package\" /> + <Folder Include="layout_tests\models\" /> + <Folder Include="layout_tests\port\" /> + <Folder Include="layout_tests\reftests\" /> + <Folder Include="layout_tests\servers\" /> + <Folder Include="layout_tests\views\" /> + <Folder Include="performance_tests\" /> + <Folder Include="style\" /> + <Folder Include="style\checkers\" /> + <Folder Include="test\" /> + <Folder Include="thirdparty\" /> + <Folder Include="thirdparty\mod_pywebsocket\" /> + <Folder Include="thirdparty\mod_pywebsocket\handshake\" /> + <Folder Include="tool\" /> + <Folder Include="tool\bot\" /> + <Folder Include="tool\commands\" /> + <Folder Include="tool\servers\" /> + <Folder Include="tool\steps\" /> + <Folder Include="to_be_moved\" /> + </ItemGroup> + <Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" /> +</Project>
\ No newline at end of file diff --git a/Tools/Scripts/webkitpy/webkitpy.sln b/Tools/Scripts/webkitpy/webkitpy.sln new file mode 100644 index 000000000..7648387b8 --- /dev/null +++ b/Tools/Scripts/webkitpy/webkitpy.sln @@ -0,0 +1,18 @@ + +Microsoft Visual Studio Solution File, Format Version 11.00 +# Visual Studio 2010 +Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "webkitpy", "webkitpy.pyproj", "{59B0A791-93FE-40F8-A52B-BA19B73E8FA6}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {59B0A791-93FE-40F8-A52B-BA19B73E8FA6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {59B0A791-93FE-40F8-A52B-BA19B73E8FA6}.Release|Any CPU.ActiveCfg = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal |