summaryrefslogtreecommitdiff
path: root/Tools/Scripts
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-05-07 11:21:11 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-05-07 11:21:11 +0200
commit2cf6c8816a73e0132bd8fa3b509d62d7c51b6e47 (patch)
tree988e8c5b116dd0466244ae2fe5af8ee9be926d76 /Tools/Scripts
parentdd91e772430dc294e3bf478c119ef8d43c0a3358 (diff)
downloadqtwebkit-2cf6c8816a73e0132bd8fa3b509d62d7c51b6e47.tar.gz
Imported WebKit commit 7e538425aa020340619e927792f3d895061fb54b (http://svn.webkit.org/repository/webkit/trunk@116286)
Diffstat (limited to 'Tools/Scripts')
-rw-r--r--Tools/Scripts/VCSUtils.pm17
-rwxr-xr-xTools/Scripts/build-jsc52
-rwxr-xr-xTools/Scripts/build-webkit379
-rwxr-xr-xTools/Scripts/check-for-inappropriate-objc-class-names3
-rwxr-xr-xTools/Scripts/configure-github-as-upstream35
-rwxr-xr-xTools/Scripts/copy-webkitlibraries-to-product-directory69
-rwxr-xr-xTools/Scripts/git-add-reviewer2
-rwxr-xr-xTools/Scripts/new-run-webkit-tests12
-rwxr-xr-xTools/Scripts/old-run-webkit-tests28
-rwxr-xr-xTools/Scripts/prepare-ChangeLog1
-rwxr-xr-xTools/Scripts/run-fast-jsc4
-rwxr-xr-xTools/Scripts/run-gtk-tests428
-rwxr-xr-xTools/Scripts/run-javascriptcore-tests18
-rwxr-xr-xTools/Scripts/run-launcher8
-rwxr-xr-xTools/Scripts/run-perf-tests4
-rwxr-xr-xTools/Scripts/run-qtwebkit-tests52
-rwxr-xr-xTools/Scripts/run-webkit-tests8
-rwxr-xr-xTools/Scripts/sync-master-with-upstream46
-rwxr-xr-xTools/Scripts/test-webkitpy21
-rwxr-xr-xTools/Scripts/update-webkit-chromium11
-rwxr-xr-xTools/Scripts/update-webkit-libs-jhbuild58
-rwxr-xr-xTools/Scripts/update-webkitefl-libs23
-rwxr-xr-xTools/Scripts/update-webkitgtk-libs18
-rwxr-xr-xTools/Scripts/webkit-build-directory17
-rwxr-xr-xTools/Scripts/webkit-tools-completion.sh18
-rwxr-xr-xTools/Scripts/webkitdirs.pm186
-rw-r--r--Tools/Scripts/webkitperl/FeatureList.pm359
-rw-r--r--Tools/Scripts/webkitperl/features.pm21
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py19
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py19
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/changelog.py18
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/config/committers.py52
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports.py6
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports_mock.py3
-rw-r--r--Tools/Scripts/webkitpy/common/config/urls.py32
-rw-r--r--Tools/Scripts/webkitpy/common/config/urls_unittest.py54
-rwxr-xr-xTools/Scripts/webkitpy/common/config/watchlist63
-rw-r--r--Tools/Scripts/webkitpy/common/find_files_unittest.py3
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py16
-rw-r--r--Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py27
-rw-r--r--Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py3
-rw-r--r--Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py25
-rw-r--r--Tools/Scripts/webkitpy/common/net/file_uploader.py22
-rw-r--r--Tools/Scripts/webkitpy/common/net/irc/ircbot.py21
-rw-r--r--Tools/Scripts/webkitpy/common/net/layouttestresults.py6
-rw-r--r--Tools/Scripts/webkitpy/common/net/networktransaction.py9
-rw-r--r--Tools/Scripts/webkitpy/common/net/statusserver.py4
-rw-r--r--Tools/Scripts/webkitpy/common/net/unittestresults.py50
-rw-r--r--Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py98
-rw-r--r--Tools/Scripts/webkitpy/common/system/crashlogs.py57
-rw-r--r--Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py18
-rw-r--r--Tools/Scripts/webkitpy/common/system/executive.py15
-rw-r--r--Tools/Scripts/webkitpy/common/system/executive_mock.py23
-rw-r--r--Tools/Scripts/webkitpy/common/system/file_lock.py16
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem.py5
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem_mock.py9
-rw-r--r--Tools/Scripts/webkitpy/common/system/platforminfo.py7
-rw-r--r--Tools/Scripts/webkitpy/common/system/platforminfo_mock.py8
-rw-r--r--Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py31
-rw-r--r--Tools/Scripts/webkitpy/common/system/systemhost_mock.py6
-rw-r--r--Tools/Scripts/webkitpy/common/system/user.py16
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager.py76
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py45
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py24
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py26
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py25
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py183
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py48
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/worker.py88
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/worker_unittest.py3
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py3
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py10
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py129
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py250
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py118
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py114
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_failures.py65
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_input.py20
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/__init__.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/apple.py3
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/base.py97
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/builders.py11
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium.py137
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py76
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py16
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py103
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium_win.py3
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/config_mock.py24
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/driver.py17
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/efl.py26
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/factory.py24
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/gtk.py100
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py39
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/http_lock.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/mac.py105
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py112
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py72
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py72
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/port_testcase.py35
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py85
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/qt.py14
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/server_process.py176
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py73
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/test.py38
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/webkit.py124
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py78
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/win.py16
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py76
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py59
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py159
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py6
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/servers/http_server.py3
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/servers/http_server_base.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py153
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py164
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing.py90
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py108
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py200
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftest_unittest.py146
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py148
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py180
-rw-r--r--Tools/Scripts/webkitpy/style/checker.py23
-rwxr-xr-xTools/Scripts/webkitpy/style/checker_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/cpp.py12
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py49
-rwxr-xr-xTools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/png.py97
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/png_unittest.py118
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/xml_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/style/error_handlers.py8
-rw-r--r--Tools/Scripts/webkitpy/style/patchreader.py19
-rw-r--r--Tools/Scripts/webkitpy/style/patchreader_unittest.py11
-rw-r--r--Tools/Scripts/webkitpy/test/main.py155
-rw-r--r--Tools/Scripts/webkitpy/test/runner.py132
-rw-r--r--Tools/Scripts/webkitpy/test/runner_unittest.py112
-rw-r--r--Tools/Scripts/webkitpy/test/test_finder.py184
-rw-r--r--Tools/Scripts/webkitpy/test/test_finder_unittest.py134
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/__init__.py10
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py10
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py57
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py1
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/irc_command.py14
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py29
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py34
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py24
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/sheriff.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/download.py23
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/download_unittest.py30
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queries.py183
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queries_unittest.py108
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues.py10
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline.py106
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py85
-rw-r--r--Tools/Scripts/webkitpy/tool/mocktool.py7
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/gardeningserver.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py31
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/commit.py7
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/commit_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/createbug.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/runtests.py15
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py6
180 files changed, 6114 insertions, 2884 deletions
diff --git a/Tools/Scripts/VCSUtils.pm b/Tools/Scripts/VCSUtils.pm
index e6858d955..9835c17d4 100644
--- a/Tools/Scripts/VCSUtils.pm
+++ b/Tools/Scripts/VCSUtils.pm
@@ -120,6 +120,9 @@ sub exitStatus($)
if ($^O eq "MSWin32") {
return $returnvalue >> 8;
}
+ if (!WIFEXITED($returnvalue)) {
+ return 254;
+ }
return WEXITSTATUS($returnvalue);
}
@@ -434,6 +437,16 @@ sub possiblyColored($$)
}
}
+sub adjustPathForRecentRenamings($)
+{
+ my ($fullPath) = @_;
+
+ $fullPath =~ s|WebCore/webaudio|WebCore/Modules/webaudio|g;
+ $fullPath =~ s|JavaScriptCore/wtf|WTF/wtf|g;
+
+ return $fullPath;
+}
+
sub canonicalizePath($)
{
my ($file) = @_;
@@ -624,7 +637,7 @@ sub parseGitDiffHeader($$)
# The first and second paths can differ in the case of copies
# and renames. We use the second file path because it is the
# destination path.
- $indexPath = $4;
+ $indexPath = adjustPathForRecentRenamings($4);
# Use $POSTMATCH to preserve the end-of-line character.
$_ = "Index: $indexPath$POSTMATCH"; # Convert to SVN format.
} else {
@@ -740,7 +753,7 @@ sub parseSvnDiffHeader($$)
my $indexPath;
if (/$svnDiffStartRegEx/) {
- $indexPath = $1;
+ $indexPath = adjustPathForRecentRenamings($1);
} else {
die("First line of SVN diff does not begin with \"Index \": \"$_\"");
}
diff --git a/Tools/Scripts/build-jsc b/Tools/Scripts/build-jsc
index 2da9ee9da..418f22b61 100755
--- a/Tools/Scripts/build-jsc
+++ b/Tools/Scripts/build-jsc
@@ -61,26 +61,40 @@ chdirWebKit();
my @options = XcodeOptions();
my @coverageSupportOptions = ($coverageSupport) ? XcodeCoverageSupportOptions() : ();
-chdir "Source/JavaScriptCore" or die "Can't find JavaScriptCore directory to build from";
-my $result;
-if (isAppleMacWebKit()) {
- $result = system "sh", "-c", 'xcodebuild -project JavaScriptCore.xcodeproj "$@" | grep -v setenv && exit ${PIPESTATUS[0]}', "xcodebuild", @options, @ARGV, @coverageSupportOptions;
-} elsif (isAppleWinWebKit()) {
- $result = buildVisualStudioProject("JavaScriptCore.vcproj/JavaScriptCore.sln");
-} elsif (isGtk()) {
- checkForArgumentAndRemoveFromARGV("--gtk");
- $result = buildGtkProject("JavaScriptCore", 0, @ARGV);
-} elsif (isQt()) {
- # Remove duplicated --qt options to avoid passing them to qmake
+if (isQt()) {
checkForArgumentAndRemoveFromARGV("--qt");
my @projects = ("WTF", "JavaScriptCore");
- $result = buildQMakeProjects(\@projects, 0, @ARGV);
-} elsif (isWx()) {
- # Builds everything in one-shot. No need to build anything here.
- $result = 0;
+ # Pick up the --no-webkit2 option from BUILD_WEBKIT_ARGS if it is needed
+ push @ARGV, split(/ /, $ENV{'BUILD_WEBKIT_ARGS'}) if ($ENV{'BUILD_WEBKIT_ARGS'});
+ push @ARGV, "--qmakearg=CONFIG+=no_webkit2" if checkForArgumentAndRemoveFromARGV("--no-webkit2");
+ my $result = buildQMakeProjects(\@projects, 0, @ARGV);
+ exit exitStatus($result);
} elsif (cmakeBasedPortName()) {
- buildCMakeProjectOrExit(0, cmakeBasedPortName(), undef, undef, ("-DONLY_BUILD_JAVASCRIPTCORE=1", cmakeBasedPortArguments())); # This call never returns.
-} else {
- die "Building not defined for this platform!\n";
+ buildCMakeProjectOrExit(0, cmakeBasedPortName(), undef, "jsc", cmakeBasedPortArguments()); # This call only returns if nothing wrong happened
+ exit exitStatus(0);
}
-exit exitStatus($result);
+
+sub buildMyProject
+{
+ my ($projectDirectory, $projectName) = @_;
+ my $result;
+ chdir $projectDirectory or die "Can't find $projectName directory to build from";
+ if (isAppleMacWebKit()) {
+ $result = system "sh", "-c", ('xcodebuild -project ' . $projectName . '.xcodeproj "$@" | grep -v setenv && exit ${PIPESTATUS[0]}'), "xcodebuild", @options, @ARGV, @coverageSupportOptions;
+ } elsif (isAppleWinWebKit()) {
+ $result = buildVisualStudioProject("$projectName.vcproj/$projectName.sln");
+ } elsif (isGtk()) {
+ checkForArgumentAndRemoveFromARGV("--gtk");
+ $result = buildGtkProject($projectName, 0, @ARGV);
+ } elsif (isWx()) {
+ # Builds everything in one-shot. No need to build anything here.
+ $result = 0;
+ } else {
+ die "Building not defined for this platform!\n";
+ }
+ exit exitStatus($result) if exitStatus($result);
+ chdirWebKit();
+}
+
+buildMyProject("Source/WTF", "WTF");
+buildMyProject("Source/JavaScriptCore", "JavaScriptCore");
diff --git a/Tools/Scripts/build-webkit b/Tools/Scripts/build-webkit
index 7c8cac8e6..cba264b83 100755
--- a/Tools/Scripts/build-webkit
+++ b/Tools/Scripts/build-webkit
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
-# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
+# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Apple Inc. All rights reserved.
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2010 moiji-mobile.com All rights reserved.
# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
@@ -39,7 +39,7 @@ use FindBin;
use Getopt::Long qw(:config pass_through);
use lib $FindBin::Bin;
use webkitdirs;
-use webkitperl::features;
+use webkitperl::FeatureList qw(getFeatureOptionList);
use POSIX;
sub cMakeArgsFromFeatures();
@@ -62,309 +62,10 @@ my $makeArgs = "";
my $cmakeArgs;
my $onlyWebKitProject = 0;
my $noWebKit2 = 0;
+my $coverageSupport = 0;
my $startTime = time();
-my (
- $requestAnimationFrameSupport,
- $threeDCanvasSupport,
- $threeDRenderingSupport,
- $accelerated2dCanvasSupport,
- $animationAPISupport,
- $blobSupport,
- $channelMessagingSupport,
- $clientBasedGeolocationSupport,
- $coverageSupport,
- $cssFiltersSupport,
- $cssGridLayoutSupport,
- $cssShadersSupport,
- $sqlDatabaseSupport,
- $datalistSupport,
- $dataTransferItemsSupport,
- $detailsSupport,
- $deviceOrientationSupport,
- $directoryUploadSupport,
- $downloadAttributeSupport,
- $fileSystemSupport,
- $filtersSupport,
- $ftpDirSupport,
- $fullscreenAPISupport,
- $gamepadSupport,
- $geolocationSupport,
- $iconDatabaseSupport,
- $imageResizerSupport,
- $indexedDatabaseSupport,
- $inputColorSupport,
- $inputSpeechSupport,
- $scriptedSpeechSupport,
- $inputTypeDateSupport,
- $inputTypeDatetimeSupport,
- $inputTypeDatetimelocalSupport,
- $inputTypeMonthSupport,
- $inputTypeTimeSupport,
- $inputTypeWeekSupport,
- $javaScriptDebuggerSupport,
- $legacyNotificationsSupport,
- $linkPrefetchSupport,
- $mathmlSupport,
- $mediaSourceSupport,
- $mediaStatisticsSupport,
- $mediaStreamSupport,
- $meterTagSupport,
- $mhtmlSupport,
- $microdataSupport,
- $mutationObserversSupport,
- $netscapePluginSupport,
- $notificationsSupport,
- $orientationEventsSupport,
- $pageVisibilityApiSupport,
- $progressTagSupport,
- $quotaSupport,
- $registerProtocolHandlerSupport,
- $shadowDomSupport,
- $sharedWorkersSupport,
- $styleScopedSupport,
- $svgSupport,
- $svgDOMObjCBindingsSupport,
- $svgFontsSupport,
- $systemMallocSupport,
- $tiledBackingStoreSupport,
- $touchEventsSupport,
- $touchIconLoadingSupport,
- $vibrationSupport,
- $videoSupport,
- $videoTrackSupport,
- $webAudioSupport,
- $webInspectorSupport,
- $webSocketsSupport,
- $webTimingSupport,
- $workersSupport,
- $xsltSupport,
- $wtfURL,
-);
-
-my @features = (
- { option => "request-animation-frame", desc => "Toggle requestAnimationFrame support",
- define => "ENABLE_REQUEST_ANIMATION_FRAME", default => (isAppleMacWebKit() || isGtk() || isBlackBerry()), value => \$requestAnimationFrameSupport },
-
- { option => "download-attribute", desc => "Toggle download attribute support",
- define => "ENABLE_DOWNLOAD_ATTRIBUTE", default => isBlackBerry(), value =>\$downloadAttributeSupport },
-
- { option => "3d-canvas", desc => "Toggle 3D canvas (WebGL) support",
- define => "ENABLE_WEBGL", default => (isAppleMacWebKit() && !isLeopard()), value => \$threeDCanvasSupport },
-
- { option => "3d-rendering", desc => "Toggle 3D rendering support",
- define => "ENABLE_3D_RENDERING", default => (isAppleMacWebKit() || isQt()), value => \$threeDRenderingSupport },
-
- { option => "accelerated-2d-canvas", desc => "Toggle accelerated 2D canvas support",
- define => "ENABLE_ACCELERATED_2D_CANVAS", default => 0, value => \$accelerated2dCanvasSupport },
-
- { option => "animation-api", desc => "Toggle animation API support",
- define => "ENABLE_ANIMATION_API", default => isBlackBerry(), value => \$animationAPISupport },
-
- { option => "blob", desc => "Toggle Blob support",
- define => "ENABLE_BLOB", default => (isAppleMacWebKit() || isGtk() || isChromium() || isBlackBerry()), value => \$blobSupport },
-
- { option => "channel-messaging", desc => "Toggle MessageChannel and MessagePort support",
- define => "ENABLE_CHANNEL_MESSAGING", default => 1, value => \$channelMessagingSupport },
-
- { option => "client-based-geolocation", desc => "Toggle client-based Geolocation support",
- define => "ENABLE_CLIENT_BASED_GEOLOCATION", default => (isAppleWebKit() || isGtk() || isBlackBerry()), value => \$clientBasedGeolocationSupport },
-
- { option => "css-filters", desc => "Toggle CSS Filters support",
- define => "ENABLE_CSS_FILTERS", default => isAppleWebKit(), value => \$cssFiltersSupport },
-
- { option => "css-grid-layout", desc => "Toggle CSS Grid Layout support",
- define => "ENABLE_CSS_GRID_LAYOUT", default => 0, value => \$cssGridLayoutSupport },
-
- { option => "css-shaders", desc => "Toggle CSS Shaders (within CSS Filters) support",
- define => "ENABLE_CSS_SHADERS", default => isAppleWebKit(), value => \$cssShadersSupport },
-
- { option => "coverage", desc => "Toggle code coverage support",
- define => "", default => 0, value => \$coverageSupport },
-
- { option => "sql-database", desc => "Toggle SQL Database Support",
- define => "ENABLE_SQL_DATABASE", default => 1, value => \$sqlDatabaseSupport },
-
- { option => "datalist", desc => "Toggle HTML5 datalist support",
- define => "ENABLE_DATALIST", default => 1, value => \$datalistSupport },
-
- { option => "data-transfer-items", desc => "Toggle HTML5 data transfer items support",
- define => "ENABLE_DATA_TRANSFER_ITEMS", default => 0, value => \$dataTransferItemsSupport },
-
- { option => "details", desc => "Toggle HTML5 details support",
- define => "ENABLE_DETAILS", default => 1, value => \$detailsSupport },
-
- { option => "device-orientation", desc => "Toggle DeviceOrientation support",
- define => "ENABLE_DEVICE_ORIENTATION", default => isBlackBerry(), value => \$deviceOrientationSupport },
-
- { option => "directory-upload", desc => "Toogle Directory upload support",
- define => "ENABLE_DIRECTORY_UPLOAD", default => 0, value => \$directoryUploadSupport },
-
- { option => "file-system", desc => "Toggle FileSystem support",
- define => "ENABLE_FILE_SYSTEM", default => 0, value => \$fileSystemSupport },
-
- { option => "filters", desc => "Toggle SVG Filters support",
- define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isQt() || isEfl() || isBlackBerry()), value => \$filtersSupport },
-
- { option => "ftpdir", desc => "Toggle FTP directory support",
- define => "ENABLE_FTPDIR", default => !isWinCE(), value => \$ftpDirSupport },
-
- { option => "fullscreen-api", desc => "Toggle Fullscreen API support",
- define => "ENABLE_FULLSCREEN_API", default => (isAppleMacWebKit() || isGtk()), value => \$fullscreenAPISupport },
-
- { option => "gamepad", desc => "Toggle Gamepad support",
- define => "ENABLE_GAMEPAD", default => 0, value => \$gamepadSupport },
-
- { option => "geolocation", desc => "Toggle Geolocation support",
- define => "ENABLE_GEOLOCATION", default => (isAppleWebKit() || isGtk() || isBlackBerry()), value => \$geolocationSupport },
-
- { option => "icon-database", desc => "Toggle Icon database support",
- define => "ENABLE_ICONDATABASE", default => 1, value => \$iconDatabaseSupport },
-
- { option => "indexed-database", desc => "Toggle Indexed Database API support",
- define => "ENABLE_INDEXED_DATABASE", default => 0, value => \$indexedDatabaseSupport },
-
- { option => "input-color", desc => "Color Input support",
- define => "ENABLE_INPUT_COLOR", default => isBlackBerry(), value => \$inputColorSupport },
-
- { option => "input-speech", desc => "Speech Input API support",
- define => "ENABLE_INPUT_SPEECH", default => 0, value => \$inputSpeechSupport },
-
- { option => "scripted-speech", desc => "Scripted Speech API support",
- define => "ENABLE_SCRIPTED_SPEECH", default => 0, value => \$scriptedSpeechSupport },
-
- { option => "input-type-date", desc => "Toggle date type <input> support",
- define => "ENABLE_INPUT_TYPE_DATE", default => 0, value => \$inputTypeDateSupport },
-
- { option => "input-type-datetime", desc => "Toggle datetime type <input> support",
- define => "ENABLE_INPUT_TYPE_DATETIME", default => 0, value => \$inputTypeDatetimeSupport },
-
- { option => "input-type-datetimelocal", desc => "Toggle datetime-local type <input> support",
- define => "ENABLE_INPUT_TYPE_DATETIMELOCAL", default => 0, value => \$inputTypeDatetimelocalSupport },
-
- { option => "input-type-month", desc => "Toggle month type <input> support",
- define => "ENABLE_INPUT_TYPE_MONTH", default => 0, value => \$inputTypeMonthSupport },
-
- { option => "input-type-time", desc => "Toggle time type <input> support",
- define => "ENABLE_INPUT_TYPE_TIME", default => 0, value => \$inputTypeTimeSupport },
-
- { option => "input-type-week", desc => "Toggle week type <input> support",
- define => "ENABLE_INPUT_TYPE_WEEK", default => 0, value => \$inputTypeWeekSupport },
-
- { option => "inspector", desc => "Toggle Web Inspector support",
- define => "ENABLE_INSPECTOR", default => !isWinCE(), value => \$webInspectorSupport },
-
- { option => "javascript-debugger", desc => "Toggle JavaScript Debugger/Profiler support",
- define => "ENABLE_JAVASCRIPT_DEBUGGER", default => 1, value => \$javaScriptDebuggerSupport },
-
- { option => "legacy-notifications", desc => "Toggle Legacy Desktop Notifications Support",
- define => "ENABLE_LEGACY_NOTIFICATIONS", default => isBlackBerry(), value => \$legacyNotificationsSupport },
-
- { option => "link-prefetch", desc => "Toggle pre fetching support",
- define => "ENABLE_LINK_PREFETCH", default => 0, value => \$linkPrefetchSupport },
-
- { option => "mathml", desc => "Toggle MathML support",
- define => "ENABLE_MATHML", default => 1, value => \$mathmlSupport },
-
- { option => "media-source", desc => "Toggle Media Source support",
- define => "ENABLE_MEDIA_SOURCE", default => 0, value => \$mediaSourceSupport },
-
- { option => "media-statistics", desc => "Toggle Media Statistics support",
- define => "ENABLE_MEDIA_STATISTICS", default => 0, value => \$mediaStatisticsSupport },
-
- { option => "media-stream", desc => "Toggle Media Stream API support (implies Blob support, currently Chromium and GTK only)",
- define => "ENABLE_MEDIA_STREAM", default => (isChromium() || isGtk()), value => \$mediaStreamSupport },
-
- { option => "meter-tag", desc => "Meter Tag support",
- define => "ENABLE_METER_TAG", default => !isAppleWinWebKit(), value => \$meterTagSupport },
-
- { option => "mhtml", desc => "Toggle MHTML support",
- define => "ENABLE_MHTML", default => 0, value => \$mhtmlSupport },
-
- { option => "microdata", desc => "Toggle Microdata support",
- define => "ENABLE_MICRODATA", default => 0, value => \$microdataSupport },
-
- { option => "mutation-observers", desc => "Toggle DOM mutation observer support",
- define => "ENABLE_MUTATION_OBSERVERS", default => 1, value => \$mutationObserversSupport },
-
- { option => "netscape-plugin", desc => "Netscape Plugin support",
- define => "ENABLE_NETSCAPE_PLUGIN_API", default => !isEfl(), value => \$netscapePluginSupport },
-
- { option => "notifications", desc => "Toggle Desktop Notifications Support",
- define => "ENABLE_NOTIFICATIONS", default => isBlackBerry(), value => \$notificationsSupport },
-
- { option => "orientation-events", desc => "Toggle Orientation Events support",
- define => "ENABLE_ORIENTATION_EVENTS", default => isBlackBerry(), value => \$orientationEventsSupport },
-
- { option => "page-visibility-api", desc => "Page Visibility API support",
- define => "ENABLE_PAGE_VISIBILITY_API", default => isEfl(), value => \$pageVisibilityApiSupport },
-
- { option => "progress-tag", desc => "Progress Tag support",
- define => "ENABLE_PROGRESS_TAG", default => 1, value => \$progressTagSupport },
-
- { option => "quota", desc => "Toggle Quota support",
- define => "ENABLE_QUOTA", default => 0, value => \$quotaSupport },
-
- { option => "register-protocol-handler", desc => "Register Protocol Handler support",
- define => "ENABLE_REGISTER_PROTOCOL_HANDLER", default => 0, value => \$registerProtocolHandlerSupport },
-
- { option => "system-malloc", desc => "Toggle system allocator instead of TCmalloc",
- define => "USE_SYSTEM_MALLOC", default => 0, value => \$systemMallocSupport },
-
- { option => "shadow-dom", desc => "Toggle Shadow DOM support",
- define => "ENABLE_SHADOW_DOM", default => 0, value => \$shadowDomSupport },
-
- { option => "shared-workers", desc => "Toggle SharedWorkers support",
- define => "ENABLE_SHARED_WORKERS", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$sharedWorkersSupport },
-
- { option => "style-scoped", desc => "Toggle <style scoped> support",
- define => "ENABLE_STYLE_SCOPED", default => 0, value => \$styleScopedSupport },
-
- { option => "svg", desc => "Toggle SVG support",
- define => "ENABLE_SVG", default => 1, value => \$svgSupport },
-
- { option => "svg-dom-objc-bindings", desc => "Toggle SVG DOM Objective-C bindings support (implies SVG support)",
- define => "ENABLE_SVG_DOM_OBJC_BINDINGS", default => isAppleMacWebKit(), value => \$svgDOMObjCBindingsSupport },
-
- { option => "svg-fonts", desc => "Toggle SVG fonts support (imples SVG support)",
- define => "ENABLE_SVG_FONTS", default => 1, value => \$svgFontsSupport },
-
- { option => "tiled-backing-store", desc => "Toggle Tiled Backing Store support",
- define => "WTF_USE_TILED_BACKING_STORE", default => isQt(), value => \$tiledBackingStoreSupport },
-
- { option => "touch-events", desc => "Toggle Touch Events support",
- define => "ENABLE_TOUCH_EVENTS", default => (isQt() || isBlackBerry()), value => \$touchEventsSupport },
-
- { option => "touch-icon-loading", desc => "Toggle Touch Icon Loading Support",
- define => "ENABLE_TOUCH_ICON_LOADING", default => 0, value => \$touchIconLoadingSupport },
-
- { option => "vibration", desc => "Toggle Vibration API support",
- define => "ENABLE_VIBRATION", default => isEfl(), value => \$vibrationSupport },
-
- { option => "video", desc => "Toggle Video support",
- define => "ENABLE_VIDEO", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$videoSupport },
-
- { option => "video-track", desc => "Toggle Video Track support",
- define => "ENABLE_VIDEO_TRACK", default => (isAppleWebKit() || isGtk()), value => \$videoTrackSupport },
-
- { option => "web-audio", desc => "Toggle Web Audio support",
- define => "ENABLE_WEB_AUDIO", default => 0, value=> \$webAudioSupport },
-
- { option => "web-sockets", desc => "Toggle Web Sockets support",
- define => "ENABLE_WEB_SOCKETS", default => 1, value=> \$webSocketsSupport },
-
- { option => "web-timing", desc => "Toggle Web Timing support",
- define => "ENABLE_WEB_TIMING", default => 0, value=> \$webTimingSupport },
-
- { option => "workers", desc => "Toggle Web Workers support",
- define => "ENABLE_WORKERS", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$workersSupport },
-
- { option => "wtfurl", desc => "Toogle the use of WTFURL for URL parsing",
- define => "WTF_USE_WTFURL", default => 0, value => \$wtfURL },
-
- { option => "xslt", desc => "Toggle XSLT support",
- define => "ENABLE_XSLT", default => 1, value => \$xsltSupport },
-);
+my @features = getFeatureOptionList();
# Update defaults from Qt's project file
if (isQt()) {
@@ -405,10 +106,6 @@ foreach (@features) {
${$_->{value}} = ($minimal ? 0 : $_->{default});
}
-$svgSupport = $svgSupport || $svgDOMObjCBindingsSupport || $svgFontsSupport;
-
-$blobSupport = $blobSupport || $mediaStreamSupport;
-
my $programName = basename($0);
my $usage = <<EOF;
Usage: $programName [options] [options to pass to build system]
@@ -416,7 +113,7 @@ Usage: $programName [options] [options to pass to build system]
--clean Cleanup the build directory
--debug Compile in debug mode
--gyp Use GYP-generated project files
- --dsym Change debugging format to dwarf-with-dsym (Mac only)
+ --coverage Enable Code Coverage support (Mac only)
--blackberry Build the BlackBerry port on Mac/Linux
--chromium Build the Chromium port on Mac/Win/Linux
@@ -458,6 +155,7 @@ my %options = (
'v8' => \$v8,
'only-webkit' => \$onlyWebKitProject,
'no-webkit2' => \$noWebKit2,
+ 'coverage' => \$coverageSupport,
);
# Build usage text and options list from features
@@ -496,7 +194,7 @@ sub unlinkZeroFiles()
my @projects = ("Source/JavaScriptCore", "Source/WebCore", "Source/WebKit");
# Build WTF as a separate static library on ports which support it.
-splice @projects, 0, 0, "Source/WTF" if isAppleMacWebKit();
+splice @projects, 0, 0, "Source/WTF" if isAppleMacWebKit() or isAppleWinWebKit();
for my $dir (@projects) {
if (! -d $dir) {
@@ -536,10 +234,8 @@ if (isGtk()) {
}
foreach (@features) {
- if ($_->{option} ne "coverage" && $_->{option} ne "wtfurl") {
- my $option = option($_->{define}, ${$_->{value}}, $_->{default});
- push @options, $option unless $option eq "";
- }
+ my $option = option($_->{define}, ${$_->{value}}, $_->{default});
+ push @options, $option unless $option eq "";
}
# ANGLE must come before WebCore
@@ -549,37 +245,7 @@ if (isGtk()) {
push @projects, ("Source/WebKit2", "Tools/MiniBrowser") if osXVersion()->{"minor"} >= 6 and !$noWebKit2;
# Copy library and header from WebKitLibraries to a findable place in the product directory.
- my @librariesToCopy = (
- "libWebKitSystemInterfaceLeopard.a",
- "libWebKitSystemInterfaceSnowLeopard.a",
- "libWebKitSystemInterfaceLion.a",
- "libWebCoreSQLite3.a",
- );
- foreach my $libName (@librariesToCopy) {
- my $srcLib = "WebKitLibraries/" . $libName;
- my $lib = "$productDir/" . $libName;
- if (!-e $lib || -M $lib > -M $srcLib) {
- print "Updating $lib\n";
- system "ditto", $srcLib, $lib;
- system "ranlib", $lib;
- }
- }
-
- # FIXME: This code should be abstracted to not be copy/paste.
- my $srcHeader = "WebKitLibraries/WebKitSystemInterface.h";
- my $header = "$productDir/usr/local/include/WebKitSystemInterface.h";
- if (!-e $header || -M $header > -M $srcHeader) {
- print "Updating $header\n";
- system "mkdir", "-p", "$productDir/usr/local/include";
- system "ditto", $srcHeader, $header;
- }
-
- my $srcHeaderDir = "WebKitLibraries/WebCoreSQLite3";
- my $headerDir = "$productDir/WebCoreSQLite3";
- if (!-e $headerDir || -M $headerDir > -M $srcHeaderDir) {
- print "Updating $headerDir\n";
- system "ditto", $srcHeaderDir, $headerDir;
- }
+ (system("perl Tools/Scripts/copy-webkitlibraries-to-product-directory") == 0) or die;
} elsif (isWinCairo()) {
(system("perl Tools/Scripts/update-webkit-wincairo-libs") == 0) or die;
} elsif (isAppleWinWebKit()) {
@@ -593,7 +259,7 @@ if (isGtk()) {
push @options, "--qmakearg=CONFIG+=no_webkit2" if $noWebKit2;
if (checkForArgumentAndRemoveFromARGV("-2")) {
- print "Note: WebKit2 is now built by default, you don't need to passs -2. Disable using --no-webkit2\n";
+ print "Note: WebKit2 is now built by default, you don't need to pass -2. Disable using --no-webkit2\n";
}
@options = (@ARGV, @options);
@@ -602,12 +268,11 @@ if (isGtk()) {
push @options, "DEFINES+=$_->{define}=${$_->{value}}" if $_->{define} && ${$_->{value}} != $_->{default};
}
- if ($minimal) {
- push @options, "CONFIG+=minimal";
- }
-
if ($v8) {
- push @options, "CONFIG+=v8";
+ print "Building WebKit2 with v8 is not supported currently. Disabling WebKit2.\n";
+ # FIXME: Deal with this in defaults_pre, once Qt has support for getting at the
+ # command line arguments at that stage.
+ push @options, "CONFIG+=v8 CONFIG+=no_webkit2";
}
}
@@ -616,9 +281,6 @@ if (isGtk()) {
# build options, etc.
@projects = ("Source/WebKit") if $onlyWebKitProject;
-# Force re-link of existing libraries if different than expected
-removeLibraryDependingOnFeature("WebCore", "SVG", $svgSupport);
-
if (isInspectorFrontend()) {
exit exitStatus(copyInspectorFrontendFiles());
}
@@ -678,7 +340,7 @@ if (isBlackBerry()) {
}
$makeArgs .= ($makeArgs ? " " : "") . "-j" . $numberOfJobs if $makeArgs !~ /-j\s*\d+/;
$prefixPath = $ENV{"STAGE_DIR"} unless $prefixPath;
- buildCMakeProjectOrExit($clean, "BlackBerry", $prefixPath, $makeArgs, (cmakeBasedPortArguments(), cMakeArgsFromFeatures()));
+ buildCMakeProjectOrExit($clean, "BlackBerry", $prefixPath, $makeArgs, (cmakeBasedPortArguments(), cMakeArgsFromFeatures()), $cmakeArgs);
}
if (isQt()) {
@@ -700,14 +362,13 @@ for my $dir (@projects) {
my $project = basename($dir);
if (isGtk()) {
- if (!$noWebKit2) {
- unshift(@options, "--enable-webkit2");
+ if ($noWebKit2) {
+ unshift(@options, "--disable-webkit2");
}
$result = buildGtkProject($project, $clean, @options);
} elsif (isAppleMacWebKit()) {
my @local_options = @options;
push @local_options, XcodeCoverageSupportOptions() if $coverageSupport && $project ne "ANGLE";
- push @local_options, "OTHER_CFLAGS= \$(OTHER_CFLAGS) -DWTF_USE_WTFURL=1" if $wtfURL;
my $useGYPProject = $useGYP && ($project =~ "WebCore|JavaScriptCore");
my $projectPath = $useGYPProject ? "gyp/$project" : $project;
$result = buildXCodeProject($projectPath, $clean, @local_options, @ARGV);
@@ -742,6 +403,10 @@ exit if $clean;
# Don't report congrats message if build was interrupted by the user.
exit if ($result & 127) == SIGINT;
+# Explicitly chdir back to where exit will take us anyway, since the following "launcher"
+# message is relative to that directory.
+chdir $originalWorkingDirectory;
+
# Write out congratulations message.
writeCongrats();
diff --git a/Tools/Scripts/check-for-inappropriate-objc-class-names b/Tools/Scripts/check-for-inappropriate-objc-class-names
index c93159f5f..5ee8410b0 100755
--- a/Tools/Scripts/check-for-inappropriate-objc-class-names
+++ b/Tools/Scripts/check-for-inappropriate-objc-class-names
@@ -38,6 +38,9 @@ sub touch($);
my @allowedPrefixes = @ARGV;
+# Xcode will automatically link ObjC binaries against libarclite in some cases, which defines a class called __ARCLite__.
+push(@allowedPrefixes, "__ARCLite");
+
die "No allowed prefixes passed on the command line" if !@allowedPrefixes;
my $arch = $ENV{'CURRENT_ARCH'};
diff --git a/Tools/Scripts/configure-github-as-upstream b/Tools/Scripts/configure-github-as-upstream
new file mode 100755
index 000000000..44336d789
--- /dev/null
+++ b/Tools/Scripts/configure-github-as-upstream
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright 2012 Google, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY GOOGLE INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script is intended to support the GitHub workflow described here:
+# https://trac.webkit.org/wiki/UsingGitHub
+#
+# This script adds WebKit's "root" repository on GitHub as a remote named
+# "upstream". You can use sync-master-with-upstream to keep your master
+# branch in sync with WebKit's "root" repository.
+
+import subprocess
+
+exit(subprocess.call(["git", "remote", "add", "upstream", "git://github.com/WebKit/webkit.git"]))
diff --git a/Tools/Scripts/copy-webkitlibraries-to-product-directory b/Tools/Scripts/copy-webkitlibraries-to-product-directory
new file mode 100755
index 000000000..e81a0c413
--- /dev/null
+++ b/Tools/Scripts/copy-webkitlibraries-to-product-directory
@@ -0,0 +1,69 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2005, 2008, 2010, 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use strict;
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+
+my $productDir = $ENV{BUILT_PRODUCTS_DIR};
+$productDir = productDir() if !$productDir;
+
+chdirWebKit();
+
+my @librariesToCopy = (
+ "libWebKitSystemInterfaceLeopard.a",
+ "libWebKitSystemInterfaceSnowLeopard.a",
+ "libWebKitSystemInterfaceLion.a",
+ "libWebCoreSQLite3.a",
+);
+
+my $ranlib = `xcrun -find ranlib`;
+chomp $ranlib;
+foreach my $libName (@librariesToCopy) {
+ my $srcLib = "WebKitLibraries/" . $libName;
+ my $lib = "$productDir/" . $libName;
+ if (!-e $lib || -M $lib > -M $srcLib) {
+ print "Updating $lib\n";
+ system "ditto", $srcLib, $lib;
+ system $ranlib, $lib;
+ }
+}
+
+# FIXME: This code should be abstracted to not be copy/paste.
+my $srcHeader = "WebKitLibraries/WebKitSystemInterface.h";
+my $header = "$productDir/usr/local/include/WebKitSystemInterface.h";
+if (!-e $header || -M $header > -M $srcHeader) {
+ print "Updating $header\n";
+ system "mkdir", "-p", "$productDir/usr/local/include";
+ system "ditto", $srcHeader, $header;
+}
+
+my $srcHeaderDir = "WebKitLibraries/WebCoreSQLite3";
+my $headerDir = "$productDir/WebCoreSQLite3";
+if (!-e $headerDir || -M $headerDir > -M $srcHeaderDir) {
+ print "Updating $headerDir\n";
+ system "ditto", $srcHeaderDir, $headerDir;
+}
diff --git a/Tools/Scripts/git-add-reviewer b/Tools/Scripts/git-add-reviewer
index 4b977a87a..16bb06be3 100755
--- a/Tools/Scripts/git-add-reviewer
+++ b/Tools/Scripts/git-add-reviewer
@@ -346,7 +346,7 @@ sub writeCommitMessageToFile($)
my ($file) = @_;
open FILE, ">", $file or return fail("Couldn't open $file.");
- open MESSAGE, "-|", qw(git rev-list --max-count=1 --pretty=format:%s%n%n%b HEAD) or return fail("Error running git rev-list.");
+ open MESSAGE, "-|", qw(git rev-list --max-count=1 --pretty=format:%B HEAD) or return fail("Error running git rev-list.");
my $commitLine = <MESSAGE>;
foreach my $line (<MESSAGE>) {
print FILE $line;
diff --git a/Tools/Scripts/new-run-webkit-tests b/Tools/Scripts/new-run-webkit-tests
index 8f7cd043e..1c3d3b159 100755
--- a/Tools/Scripts/new-run-webkit-tests
+++ b/Tools/Scripts/new-run-webkit-tests
@@ -44,14 +44,18 @@ if __name__ == '__main__':
env = os.environ
if env.has_key('PYTHONPATH'):
if script_dir not in env['PYTHONPATH']:
- env_separator = ':'
- if sys.platform == 'win32':
- env_separator = ';'
- env['PYTHONPATH'] = env['PYTHONPATH'] + env_separator + script_dir
+ env['PYTHONPATH'] = env['PYTHONPATH'] + os.pathsep + script_dir
else:
env['PYTHONPATH'] = script_dir
module_path = os.path.join(script_dir, 'webkitpy', 'layout_tests', 'run_webkit_tests.py')
cmd = [sys.executable, module_path] + sys.argv[1:]
+
+ # Wrap the NRWT process in the jhbuild environment so DRT or WKTR
+ # doesn't need to do it and their process id as reported by
+ # subprocess.Popen is not jhbuild's.
+ if '--gtk' in sys.argv[1:]:
+ cmd.insert(1, os.path.join(script_dir, '..', 'gtk', 'run-with-jhbuild'))
+
proc = subprocess.Popen(cmd, env=env)
try:
proc.wait()
diff --git a/Tools/Scripts/old-run-webkit-tests b/Tools/Scripts/old-run-webkit-tests
index 9670b77a0..fdaee927c 100755
--- a/Tools/Scripts/old-run-webkit-tests
+++ b/Tools/Scripts/old-run-webkit-tests
@@ -219,10 +219,7 @@ my @macPlatforms = ("mac-leopard", "mac-snowleopard", "mac-lion", "mac");
my @winPlatforms = ("win-xp", "win-vista", "win-7sp0", "win");
if (isAppleMacWebKit()) {
- if (isLeopard()) {
- $platform = "mac-leopard";
- $tolerance = 0.1;
- } elsif (isSnowLeopard()) {
+ if (isSnowLeopard()) {
$platform = "mac-snowleopard";
$tolerance = 0.1;
} elsif (isLion()) {
@@ -1245,20 +1242,6 @@ sub countAndPrintLeaks($$$)
"ScanFromString", # <http://code.google.com/p/angleproject/issues/detail?id=249> leak in ANGLE
);
- if (isLeopard()) {
- # Leak list for the version of Leopard used on the build bot.
- push @callStacksToExclude, (
- "CFHTTPMessageAppendBytes", # leak in CFNetwork, rdar://problem/5435912
- "sendDidReceiveDataCallback", # leak in CFNetwork, rdar://problem/5441619
- "_CFHTTPReadStreamReadMark", # leak in CFNetwork, rdar://problem/5441468
- "httpProtocolStart", # leak in CFNetwork, rdar://problem/5468837
- "_CFURLConnectionSendCallbacks", # leak in CFNetwork, rdar://problem/5441600
- "DispatchQTMsg", # leak in QuickTime, PPC only, rdar://problem/5667132
- "QTMovieContentView createVisualContext", # leak in QuickTime, PPC only, rdar://problem/5667132
- "_CopyArchitecturesForJVMVersion", # leak in Java, rdar://problem/5910823
- );
- }
-
if (isSnowLeopard()) {
push @callStacksToExclude, (
"readMakerNoteProps", # <rdar://problem/7156432> leak in ImageIO
@@ -1799,14 +1782,7 @@ sub captureSavedCrashLog($$)
if (isCygwin()) {
$glob = File::Spec->catfile($testResultsDirectory, $windowsCrashLogFilePrefix . "*.txt");
} elsif (isAppleMacWebKit()) {
- my $crashLogDirectoryName;
- if (isLeopard()) {
- $crashLogDirectoryName = "CrashReporter";
- } else {
- $crashLogDirectoryName = "DiagnosticReports";
- }
-
- $glob = File::Spec->catfile("~", "Library", "Logs", $crashLogDirectoryName, ($webProcessCrashed ? "WebProcess" : $dumpToolName) . "_*.crash");
+ $glob = File::Spec->catfile("~", "Library", "Logs", "DiagnosticReports", ($webProcessCrashed ? "WebProcess" : $dumpToolName) . "_*.crash");
# Even though the dump tool has exited, CrashReporter might still be running. We need to
# wait for it to exit to ensure it has saved its crash log to disk. For simplicitly, we'll
diff --git a/Tools/Scripts/prepare-ChangeLog b/Tools/Scripts/prepare-ChangeLog
index a10b92f9b..8a48e567e 100755
--- a/Tools/Scripts/prepare-ChangeLog
+++ b/Tools/Scripts/prepare-ChangeLog
@@ -1759,6 +1759,7 @@ sub generateFileList(\%$$$)
if isAddedStatus($status)
&& $file =~ /\.([a-zA-Z]+)$/
&& SupportedTestExtensions->{lc($1)}
+ && $file !~ /-expected(-mismatch)?\.html$/
&& !scalar(grep(/^resources$/i, @components))
&& !scalar(grep(/^script-tests$/i, @components));
}
diff --git a/Tools/Scripts/run-fast-jsc b/Tools/Scripts/run-fast-jsc
index 1fda92316..8e004c447 100755
--- a/Tools/Scripts/run-fast-jsc
+++ b/Tools/Scripts/run-fast-jsc
@@ -112,7 +112,7 @@ do
let numTestsRun=$numTestsRun+1
$jscCmd $preScript $jsTest $postScript 2>$actualErr > $actualOut
JSC_RES=$?
-
+
if [ $JSC_RES -eq 0 ]
then
diff $actualOut $expectedOut > $diffOut
@@ -130,6 +130,8 @@ do
then
testCrashed=1
echo "CRASHED"
+ else
+ echo "ERROR: $JSC_RES"
fi
fi
diff --git a/Tools/Scripts/run-gtk-tests b/Tools/Scripts/run-gtk-tests
index 8d70e0708..3f9067108 100755
--- a/Tools/Scripts/run-gtk-tests
+++ b/Tools/Scripts/run-gtk-tests
@@ -20,42 +20,125 @@
import subprocess
import os
import sys
-import time
+import optparse
+import re
+from signal import alarm, signal, SIGALRM, SIGKILL
from gi.repository import Gio, GLib
-TIMEOUT=180 # seconds
+class SkippedTest:
+ def __init__(self, test, reason, bug=None, test_cases=[]):
+ self.test = test
+ self.reason = reason
+ self.bug = bug
+ self.test_cases = test_cases
+
+ def __str__(self):
+ skipped_test_str = "%s" % self.test
+ if self.test_cases:
+ skipped_test_str += " [%s]" % ", ".join(self.test_cases)
+ skipped_test_str += ": %s " % self.reason
+ if self.bug is not None:
+ skipped_test_str += "(https://bugs.webkit.org/show_bug.cgi?id=%d)" % self.bug
+ return skipped_test_str
+
+class TestTimeout(Exception):
+ pass
class TestRunner:
- TEST_DIRS = [ "unittests", "WebKit2APITests" ]
- # FIXME: https://bugs.webkit.org/show_bug.cgi?id=74717
- SKIPPED = [ "unittests/testdownload", "unittests/testwebview", "unittests/testwebresource",
- # WebKit2APITests/TestDownloads is consistently timing
- # out on the 32bit release and 64bit debug bots.
- # https://bugs.webkit.org/show_bug.cgi?id=76910
- "WebKit2APITests/TestDownloads" ]
-
- def __init__(self):
+ TEST_DIRS = [ "unittests", "WebKit2APITests", "TestWebKitAPI/WTF", "TestWebKitAPI/WebKit2" ]
+
+ SKIPPED = [
+ SkippedTest("unittests/testdownload",
+ "Test fails in GTK Linux 64-bit Release bot",
+ 82329,
+ ["/webkit/download/not-found"]),
+ SkippedTest("unittests/testwebview",
+ "Test times out in GTK Linux 64-bit Release bot",
+ 82328,
+ ["/webkit/webview/icon-uri"]),
+ SkippedTest("unittests/testwebresource",
+ "Test fails in GTK Linux 64-bit Release bot",
+ 82330,
+ ["/webkit/webresource/sub_resource_loading"]),
+ SkippedTest("unittests/testwebinspector",
+ "Test is flaky in GTK Linux 32-bit Release bot",
+ 82869,
+ ["/webkit/webinspector/close-and-inspect"]),
+ SkippedTest("WebKit2APITests/TestWebKitWebView",
+ "Test is flaky in GTK Linux 32-bit Release bot",
+ 82866,
+ ["/webkit2/WebKitWebView/mouse-target"]),
+ SkippedTest("WebKit2APITests/TestResources",
+ "Test is flaky in GTK Linux 32-bit Release bot",
+ 82868,
+ ["/webkit2/WebKitWebView/resources"]),
+ SkippedTest("TestWebKitAPI/WebKit2/TestWKConnection",
+ "Test times out",
+ 84959),
+ SkippedTest("TestWebKitAPI/WebKit2/TestRestoreSessionStateContainingFormData",
+ "Session State is not implemented in GTK+ port",
+ 84960),
+ SkippedTest("TestWebKitAPI/WebKit2/TestSpacebarScrolling",
+ "Test fails",
+ 84961),
+ SkippedTest("TestWebKitAPI/WebKit2/TestNewFirstVisuallyNonEmptyLayoutFrames",
+ "Test fails",
+ 85037),
+ SkippedTest("TestWebKitAPI/WebKit2/TestMouseMoveAfterCrash",
+ "Test is flaky",
+ 85066)
+ ]
+
+ def __init__(self, options, tests=[]):
# FIXME: webkit-build-directory --configuration always returns
# Release because we never call set-webkit-configuration.
#build_directory_script = os.path.join(os.path.dirname(__file__), "webkit-build-directory")
#build_directory = self._executive.run_command([build_directory_script, "--configuration"]).rstrip()
- def is_valid_build_directory(build_dir):
- return os.path.exists(os.path.join(build_dir, ".libs"))
+ self._options = options
+ self._gtk_tools_directory = os.path.join(self._get_top_level_directory(), "Tools", "gtk")
+ self._programs_path = os.path.join(self._get_build_directory(), "Programs")
+ self._tests = self._get_tests(tests)
+ self._skipped_tests = TestRunner.SKIPPED
+
+ # These SPI daemons need to be active for the accessibility tests to work.
+ self._spi_registryd = None
+ self._spi_bus_launcher = None
+
+ # run-gtk-tests may be run during make distcheck, which doesn't include jhbuild.
+ self._jhbuild_path = os.path.join(self._gtk_tools_directory, "run-with-jhbuild")
+ if not os.path.exists(self._jhbuild_path):
+ self._jhbuild_path = None
+
+ def _get_top_level_directory(self):
+ return os.path.normpath(os.path.join(os.path.dirname(__file__), "..", ".."))
+
+ def _get_build_directory(self):
+ top_level = self._get_top_level_directory()
+ if self._options.release:
+ return os.path.join(top_level, 'WebKitBuild', 'Release')
+ if self._options.debug:
+ return os.path.join(top_level, 'WebKitBuild', 'Debug')
- script_dir = os.path.dirname(__file__)
- top_level = os.path.normpath(os.path.join(script_dir, "..", ".."))
build_directory = os.path.join(top_level, 'WebKitBuild', 'Release')
- if not is_valid_build_directory(build_directory):
- build_directory = os.path.join(top_level, 'WebKitBuild', 'Debug')
-
- self._a11y_registryd = None
- self._timed_out = False
- self._gtk_tools_directory = os.path.join(top_level, "Tools", "gtk")
- self._programs_path = os.path.join(build_directory, "Programs")
- self._tests = []
+ if os.path.exists(os.path.join(build_directory, '.libs')):
+ return build_directory
+ build_directory = os.path.join(top_level, 'WebKitBuild', 'Debug')
+ if os.path.exists(os.path.join(build_directory, '.libs')):
+ return build_directory
+ build_directory = os.path.join(top_level, '_build')
+ if os.path.exists(os.path.join(build_directory, '.libs')):
+ return build_directory
+
+ return os.path.join(top_level, 'WebKitBuild')
+
+ def _get_tests(self, tests):
+ if tests:
+ return tests
+
+ tests = []
for test_dir in self.TEST_DIRS:
absolute_test_dir = os.path.join(self._programs_path, test_dir)
if not os.path.isdir(absolute_test_dir):
@@ -63,126 +146,251 @@ class TestRunner:
for test_file in os.listdir(absolute_test_dir):
if not test_file.lower().startswith("test"):
continue
- test_relative_path = os.path.join(test_dir, test_file)
- if test_relative_path in self.SKIPPED:
- sys.stdout.write("Skipping test %s\n" % (test_relative_path))
- sys.stdout.flush()
- continue
-
- test_path = os.path.join(self._programs_path, test_relative_path)
+ test_path = os.path.join(self._programs_path, test_dir, test_file)
if os.path.isfile(test_path) and os.access(test_path, os.X_OK):
- self._tests.append(test_path)
+ tests.append(test_path)
+ return tests
+
+ def _create_process(self, command, stdout=None, stderr=None, env=os.environ):
+ if self._jhbuild_path:
+ command.insert(0, self._jhbuild_path)
+ return subprocess.Popen(command, stdout=stdout, stderr=stderr, env=env)
- def _lookup_atspi2_binary(self, jhbuild_path, filename):
- process = subprocess.Popen([jhbuild_path ,'pkg-config', '--variable=exec_prefix', 'atspi-2'], stdout=subprocess.PIPE)
+ def _lookup_atspi2_binary(self, filename):
+ process = self._create_process(['pkg-config', '--variable=exec_prefix', 'atspi-2'], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
exec_prefix = stdout.rstrip('\r\n')
- paths_to_check = [ 'libexec',
- 'lib/at-spi2-core',
- 'lib32/at-spi2-core',
- 'lib64/at-spi2-core' ]
- for path in paths_to_check:
+ for path in [ 'libexec', 'lib/at-spi2-core', 'lib32/at-spi2-core', 'lib64/at-spi2-core' ]:
filepath = os.path.join(exec_prefix, path, filename)
if os.path.isfile(filepath):
return filepath
return None
- def _run_command_when_dbus_service_appears(self, service_name, handler):
- def on_name_appeared(*args):
- handler()
+ def _start_accessibility_daemons(self):
+ if not self._jhbuild_path:
+ return False
- def on_name_vanished(*args):
- pass
+ spi_bus_launcher_path = self._lookup_atspi2_binary('at-spi-bus-launcher')
+ spi_registryd_path = self._lookup_atspi2_binary('at-spi2-registryd')
+ if not spi_bus_launcher_path or not spi_registryd_path:
+ return False
- Gio.bus_watch_name(Gio.BusType.SESSION, service_name,
- Gio.BusNameWatcherFlags.NONE, on_name_appeared, on_name_vanished)
+ try:
+ self._ally_bus_launcher = self._create_process([spi_bus_launcher_path], env=self._test_env)
+ except:
+ sys.stderr.write("Failed to launch the accessibility bus\n")
+ sys.stderr.flush()
+ return False
+ # We need to wait until the SPI bus is launched before trying to start the SPI
+ # registry, so we spin a main loop until the bus name appears on DBus.
+ loop = GLib.MainLoop()
+ Gio.bus_watch_name(Gio.BusType.SESSION, 'org.a11y.Bus', Gio.BusNameWatcherFlags.NONE,
+ lambda *args: loop.quit(), None)
+ loop.run()
- def _check_if_tests_have_timed_out(self):
- if time.time() - self._start_time <= TIMEOUT:
+ try:
+ self._spi_registryd = self._create_process([spi_registryd_path], env=self._test_env)
+ except:
+ sys.stderr.write("Failed to launch the accessibility registry\n")
+ sys.stderr.flush()
return False
- sys.stdout.write("Tests timed out after %d seconds\n" % TIMEOUT)
- sys.stdout.flush()
- self._timed_out = True
+
return True
- def _ensure_accessibility_daemon_is_running(self, jhbuild_path, test_env):
- a11y_registryd_path = self._lookup_atspi2_binary(jhbuild_path, 'at-spi2-registryd')
- if a11y_registryd_path:
- try:
- self._a11y_registryd = subprocess.Popen([a11y_registryd_path], env=test_env)
- except:
- sys.stderr.write("Failed to run the accessibility registry\n")
- sys.stderr.flush()
- self._a11y_registryd = None
-
- def run(self):
- if not self._tests:
- sys.stderr.write("ERROR: tests not found in %s.\n" % (self._programs_path))
+ def _setup_testing_environment(self):
+ self._test_env = os.environ
+ self._test_env["DISPLAY"] = self._options.display
+ self._test_env["WEBKIT_INSPECTOR_PATH"] = os.path.abspath(os.path.join(self._programs_path, 'resources', 'inspector'))
+ self._test_env['GSETTINGS_BACKEND'] = 'memory'
+ self._test_env["TEST_WEBKIT_API_WEBKIT2_RESOURCES_PATH"] = os.path.join(self._get_top_level_directory(), "Tools", "TestWebKitAPI", "Tests", "WebKit2")
+ self._test_env["TEST_WEBKIT_API_WEBKIT2_INJECTED_BUNDLE_PATH"] = os.path.abspath(os.path.join(self._get_build_directory(), "Libraries"))
+ self._test_env["WEBKIT_EXEC_PATH"] = self._programs_path
+
+ try:
+ self._xvfb = self._create_process(["Xvfb", self._options.display, "-screen", "0", "800x600x24", "-nolisten", "tcp"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except Exception as e:
+ sys.stderr.write("Failed to run Xvfb: %s\n", e)
sys.stderr.flush()
- return 1
+ return False
- test_env = os.environ
- test_env["DISPLAY"] = ":55"
- test_env["WEBKIT_INSPECTOR_PATH"] = os.path.abspath(os.path.join(self._programs_path, 'resources', 'inspector'))
- test_env['GSETTINGS_BACKEND'] = 'memory'
+ # If we cannot start the accessibility daemons, we can just skip the accessibility tests.
+ if not self._start_accessibility_daemons():
+ print "Could not start accessibility bus, so skipping TestWebKitAccessibility"
+ self._skipped_tests.append(SkippedTest("WebKit2APITests/TestWebKitAccessibility",
+ "Could not start accessibility bus"))
+ return True
- failed_tests = []
+ def _tear_down_testing_environment(self):
+ if self._spi_registryd:
+ self._spi_registryd.terminate()
+ if self._spi_bus_launcher:
+ self._spi_bus_launcher.terminate()
+ self._xvfb.kill();
+
+ def _find_skipped_test(self, test):
+ for skipped in self._skipped_tests:
+ if test.endswith(skipped.test):
+ return skipped
+ return None
- jhbuild_path = os.path.join(self._gtk_tools_directory, "run-with-jhbuild")
+ def _test_cases_to_skip(self, test):
+ if self._options.skipped_action != 'skip':
+ return []
+
+ skipped = self._find_skipped_test(test)
+ if skipped is not None:
+ return skipped.test_cases
+ return []
+
+ def _should_run_test(self, test):
+ # Skipped test are ignored, run all tests.
+ if self._options.skipped_action == 'ignore':
+ return True
+
+ skipped = self._find_skipped_test(test)
+ # By default skipped test are skipped, run them only when there are specific test cases failing.
+ if self._options.skipped_action == 'skip':
+ return skipped is None or skipped.test_cases
+
+ # Run only skipped tests.
+ return skipped is not None
+
+ def _get_child_pid_from_test_output(self, output):
+ if not output:
+ return -1
+ match = re.search(r'\(pid=(?P<child_pid>[0-9]+)\)', output)
+ if not match:
+ return -1
+ return int(match.group('child_pid'))
+
+ def _kill_process(self, pid):
+ try:
+ os.kill(pid, SIGKILL)
+ except OSError:
+ # Process already died.
+ pass
+
+ def _run_test_command(self, command, timeout=-1):
+ def alarm_handler(signum, frame):
+ raise TestTimeout
- # Make sure the accessibility bus is launched.
- a11y_bus_launcher_path = self._lookup_atspi2_binary(jhbuild_path, 'at-spi-bus-launcher')
- assert(a11y_bus_launcher_path)
+ p = self._create_process(command, stdout=subprocess.PIPE, env=self._test_env)
+ if timeout > 0:
+ signal(SIGALRM, alarm_handler)
+ alarm(timeout)
+
+ stdout = ""
try:
- a11y_bus_launcher = subprocess.Popen([a11y_bus_launcher_path], env=test_env)
- except:
- sys.stderr.write("Failed to launch the accessibility bus\n")
+ stdout = p.communicate()[0]
+ if timeout > 0:
+ alarm(0)
+ sys.stdout.write(stdout)
+ sys.stdout.flush()
+ except TestTimeout:
+ self._kill_process(p.pid)
+ child_pid = self._get_child_pid_from_test_output(stdout)
+ if child_pid > 0:
+ self._kill_process(child_pid)
+ raise
+
+ return not p.returncode
+
+ def _run_test_glib(self, test):
+ tester_command = ['gtester']
+ if self._options.verbose:
+ tester_command.append('--verbose')
+ for test_case in self._test_cases_to_skip(test):
+ tester_command.extend(['-s', test_case])
+ tester_command.append(test)
+
+ return self._run_test_command(tester_command, self._options.timeout)
+
+ def _run_test_google(self, test):
+ tester_command = [test, "--gtest_throw_on_failure"]
+ skipped_tests_cases = self._test_cases_to_skip(test)
+ if skipped_tests_cases:
+ tester_command.append("--gtest_filter=-%s" % ":".join(skipped_tests_cases))
+
+ return self._run_test_command(tester_command, self._options.timeout)
+
+ def _run_test(self, test):
+ if "unittests" in test or "WebKit2APITests" in test:
+ return self._run_test_glib(test)
+
+ if "TestWebKitAPI" in test:
+ return self._run_test_google(test)
+
+ return False
+
+ def run_tests(self):
+ if not self._tests:
+ sys.stderr.write("ERROR: tests not found in %s.\n" % (self._programs_path))
sys.stderr.flush()
return 1
- loop = GLib.MainLoop()
- self._start_time = time.time()
+ if not self._setup_testing_environment():
+ return 1
- def run_tests():
- self._ensure_accessibility_daemon_is_running(jhbuild_path, test_env)
+ # Remove skipped tests now instead of when we find them, because
+ # some tests might be skipped while setting up the test environment.
+ self._tests = [test for test in self._tests if self._should_run_test(test)]
+ failed_tests = []
+ timed_out_tests = []
+ try:
for test in self._tests:
- process = subprocess.Popen([jhbuild_path ,'gtester', test], env=test_env)
- if process.wait():
- failed_tests.append(test)
-
- if self._check_if_tests_have_timed_out():
- break
-
- if self._a11y_registryd:
- self._a11y_registryd.terminate()
+ success = True
+ try:
+ success = self._run_test(test)
+ except TestTimeout:
+ sys.stdout.write("TEST: %s: TIMEOUT\n" % test)
+ sys.stdout.flush()
+ timed_out_tests.append(test)
- a11y_bus_launcher.terminate()
+ if not success:
+ failed_tests.append(test)
+ finally:
+ self._tear_down_testing_environment()
- if failed_tests:
- names = [os.path.basename(t) for t in failed_tests]
- sys.stdout.write("Tests failed: %s\n" % ", ".join(names))
- sys.stdout.flush()
+ if failed_tests:
+ names = [test.replace(self._programs_path, '', 1) for test in failed_tests]
+ sys.stdout.write("Tests failed: %s\n" % ", ".join(names))
+ sys.stdout.flush()
- loop.quit()
+ if timed_out_tests:
+ names = [test.replace(self._programs_path, '', 1) for test in timed_out_tests]
+ sys.stdout.write("Tests that timed out: %s\n" % ", ".join(names))
+ sys.stdout.flush()
- self._run_command_when_dbus_service_appears("org.a11y.Bus", run_tests)
- loop.run()
+ if self._skipped_tests and self._options.skipped_action == 'skip':
+ sys.stdout.write("Tests skipped:\n%s\n" % "\n".join([str(skipped) for skipped in self._skipped_tests]))
+ sys.stdout.flush()
- return len(failed_tests) or int(self._timed_out)
+ return len(failed_tests)
if __name__ == "__main__":
- try:
- xvfb = subprocess.Popen(["Xvfb", ":55", "-screen", "0", "800x600x24", "-nolisten", "tcp"],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except:
- sys.stderr.write("Failed to run Xvfb\n")
- sys.stderr.flush()
- sys.exit(1)
-
- try:
- sys.exit(TestRunner().run())
- finally:
- xvfb.kill()
+ option_parser = optparse.OptionParser(usage='usage: %prog [options] [test...]')
+ option_parser.add_option('-r', '--release',
+ action='store_true', dest='release',
+ help='Run in Release')
+ option_parser.add_option('-d', '--debug',
+ action='store_true', dest='debug',
+ help='Run in Debug')
+ option_parser.add_option('-v', '--verbose',
+ action='store_true', dest='verbose',
+ help='Run gtester in verbose mode')
+ option_parser.add_option('--display', action='store', dest='display', default=':55',
+ help='Display to run Xvfb')
+ option_parser.add_option('--skipped', action='store', dest='skipped_action',
+ choices=['skip', 'ignore', 'only'], default='skip',
+ metavar='skip|ignore|only',
+ help='Specifies how to treat the skipped tests')
+ option_parser.add_option('-t', '--timeout',
+ action='store', type='int', dest='timeout', default=10,
+ help='Time in seconds until a test times out')
+ options, args = option_parser.parse_args()
+
+ sys.exit(TestRunner(options, args).run_tests())
diff --git a/Tools/Scripts/run-javascriptcore-tests b/Tools/Scripts/run-javascriptcore-tests
index db5537f90..ca58ab3ec 100755
--- a/Tools/Scripts/run-javascriptcore-tests
+++ b/Tools/Scripts/run-javascriptcore-tests
@@ -124,7 +124,7 @@ sub testapiPath($)
#run api tests
if (isAppleMacWebKit() || isAppleWinWebKit()) {
chdirWebKit();
- chdir($productDir) or die;
+ chdir($productDir) or die "Failed to switch directory to '$productDir'\n";
my $path = testapiPath($productDir);
# Use an "indirect object" so that system() won't get confused if the path
# contains spaces (see perldoc -f exec).
@@ -135,14 +135,22 @@ if (isAppleMacWebKit() || isAppleWinWebKit()) {
# Find JavaScriptCore directory
chdirWebKit();
chdir("Source/JavaScriptCore");
-chdir "tests/mozilla" or die;
+chdir "tests/mozilla" or die "Failed to switch directory to 'tests/mozilla'\n";
printf "Running: jsDriver.pl -e squirrelfish -s %s -f actual.html %s\n", jscPath($productDir), join(" ", @jsArgs);
-my $result = system "perl", "jsDriver.pl", "-e", "squirrelfish", "-s", jscPath($productDir), "-f", "actual.html", @jsArgs;
+my @jsDriverCmd = ("perl", "jsDriver.pl", "-e", "squirrelfish", "-s", jscPath($productDir), "-f", "actual.html", @jsArgs);
+if (isGtk() || isEfl()) {
+ my $jhbuildPrefix = sourceDir() . "/Tools/";
+ $jhbuildPrefix .= isEfl() ? "efl" : "";
+ $jhbuildPrefix .= isGtk() ? "gtk" : "";
+ $jhbuildPrefix .= "/run-with-jhbuild";
+ unshift(@jsDriverCmd, $jhbuildPrefix);
+}
+my $result = system(@jsDriverCmd);
exit exitStatus($result) if $result;
my %failures;
-open EXPECTED, "expected.html" or die;
+open EXPECTED, "expected.html" or die "Failed to open 'expected.html'\n";
while (<EXPECTED>) {
last if /failures reported\.$/;
}
@@ -154,7 +162,7 @@ close EXPECTED;
my %newFailures;
-open ACTUAL, "actual.html" or die;
+open ACTUAL, "actual.html" or die "Failed to open 'actual.html'";
while (<ACTUAL>) {
last if /failures reported\.$/;
}
diff --git a/Tools/Scripts/run-launcher b/Tools/Scripts/run-launcher
index c2a47dd1f..2ff0924b0 100755
--- a/Tools/Scripts/run-launcher
+++ b/Tools/Scripts/run-launcher
@@ -65,14 +65,16 @@ if (isQt()) {
if (isGtk()) {
if (isWK2()) {
- $launcherPath = catdir($launcherPath, "Programs", "MiniBrowser");
+ unshift(@ARGV, catdir($launcherPath, "Programs", "MiniBrowser"));
} else {
- $launcherPath = catdir($launcherPath, "Programs", "GtkLauncher");
+ unshift(@ARGV, catdir($launcherPath, "Programs", "GtkLauncher"));
}
+ $launcherPath = catdir(sourceDir(), "Tools", "gtk", "run-with-jhbuild");
}
if (isEfl()) {
- $launcherPath = catdir($launcherPath, "bin", "EWebLauncher");
+ unshift(@ARGV, catdir($launcherPath, "bin", "EWebLauncher"));
+ $launcherPath = catdir(sourceDir(), "Tools", "efl", "run-with-jhbuild");
}
if (isWx()) {
diff --git a/Tools/Scripts/run-perf-tests b/Tools/Scripts/run-perf-tests
index 6ac02f4c9..95e04a0d3 100755
--- a/Tools/Scripts/run-perf-tests
+++ b/Tools/Scripts/run-perf-tests
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -34,8 +34,6 @@ import sys
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
-_log = logging.getLogger(__name__)
-
if '__main__' == __name__:
logging.basicConfig(level=logging.INFO, format="%(message)s")
sys.exit(PerfTestsRunner().run())
diff --git a/Tools/Scripts/run-qtwebkit-tests b/Tools/Scripts/run-qtwebkit-tests
index 14c201375..288bd57d6 100755
--- a/Tools/Scripts/run-qtwebkit-tests
+++ b/Tools/Scripts/run-qtwebkit-tests
@@ -271,25 +271,46 @@ class Main(Log):
if self._options.open_results:
Popen(self._options.browser + " " + self._options.output_file, stdout=None, stderr=None, shell=True)
+ def check_crash_occurences(self, results):
+ """ Checks if any test crashes and it sums them """
+ totals = [0,0,0]
+ crash_count = 0
+ txt = []
+ #collecting results into one container with checking crash
+ for result in results:
+ found = None
+ if result.output():
+ txt.append(result.output())
+ found = re.search(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", result.output())
+
+ if found:
+ totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), (totals, found.groups()))
+ else:
+ txt.append('CRASHED: %s' % result.test_file_name())
+ crash_count += 1
+ self.warn("Missing sub-summary: %s" % result.test_file_name())
+
+ txt='\n\n'.join(txt)
+
+ totals = list(totals)
+ totals.append(crash_count)
+ totals = map(str, totals)
+ return txt, totals
+
def convert_to_stdout(self, results):
""" Converts results, that they could be nicely presented in the stdout. """
- # Join all results into one piece.
- txt = "\n\n".join(map(lambda w: w.output(), results))
- # Find total count of failed, skipped and passed tests.
- totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt)
- totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals)
- totals = map(str, totals)
- totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped"
- # Add a summary.
- txt += '\n\n\n' + '*' * 70
+ txt, totals = self.check_crash_occurences(results)
+
+ totals = "%s passed, %s failed, %s skipped, %s crashed" % (totals[0], totals[1], totals[2], totals[3])
+
+ txt += '\n' + '*' * 70
txt += "\n**" + ("TOTALS: " + totals).center(66) + '**'
txt += '\n' + '*' * 70 + '\n'
return txt
def convert_to_html(self, results):
""" Converts results, that they could showed as a html page. """
- # Join results into one piece.
- txt = "\n\n".join(map(lambda w: w.output(), results))
+ txt, totals = self.check_crash_occurences(results)
txt = txt.replace('&', '&amp;').replace('<', "&lt;").replace('>', "&gt;")
# Add a color and a style.
txt = re.sub(r"([* ]+(Finished)[ a-z_A-Z0-9]+[*]+)",
@@ -322,17 +343,14 @@ class Main(Log):
txt = re.sub(r"\n(RESULT)((.)+)",
lambda w: "</case>\n<case class='good'><br><status class='benchmark'>" + w.group(1) + r"</status>" + w.group(2),
txt)
- txt = re.sub(r"\n(QFATAL)((.)+)",
+ txt = re.sub(r"\n(QFATAL|CRASHED)((.)+)",
lambda w: "</case>\n<case class='bad'><br><status class='crash'>" + w.group(1) + r"</status>" + w.group(2),
txt)
txt = re.sub(r"\n(Totals:)([0-9', a-z]*)",
lambda w: "</case>\n<case class='good'><br><b>" + w.group(1) + r"</b>" + w.group(2) + "</case>",
txt)
- # Find total count of failed, skipped and passed tests.
- totals = re.findall(r"([0-9]+) passed, ([0-9]+) failed, ([0-9]+) skipped", txt)
- totals = reduce(lambda x, y: (int(x[0]) + int(y[0]), int(x[1]) + int(y[1]), int(x[2]) + int(y[2])), totals)
- totals = map(str, totals)
- totals = totals[0] + " passed, " + totals[1] + " failed, " + totals[2] + " skipped."
+ # Find total count of failed, skipped, passed and crashed tests.
+ totals = "%s passed, %s failed, %s skipped, %s crashed." % (totals[0], totals[1], totals[2], totals[3])
# Create a header of the html source.
txt = """
<html>
diff --git a/Tools/Scripts/run-webkit-tests b/Tools/Scripts/run-webkit-tests
index 24fcfd02f..b7e1eaa7e 100755
--- a/Tools/Scripts/run-webkit-tests
+++ b/Tools/Scripts/run-webkit-tests
@@ -58,7 +58,7 @@ sub runningOnBuildBot()
sub useNewRunWebKitTests()
{
# NRWT Windows support still needs work: https://bugs.webkit.org/show_bug.cgi?id=38756
- return 0 if isWindows() or isCygwin();
+ return 0 if (isWindows() or isCygwin()) and !isChromium();
# NRWT does not support qt-arm: https://bugs.webkit.org/show_bug.cgi?id=64086
return 0 if isQt() and isARM();
# All other platforms should use NRWT by default.
@@ -79,9 +79,11 @@ sub platformIsReadyForParallelTesting()
return !isQt();
}
+my $script = "perl";
my $harnessName = "old-run-webkit-tests";
if (useNewRunWebKitTests()) {
+ $script = "python";
$harnessName = "new-run-webkit-tests";
if (!grep(/--child-processes/, @ARGV) and !platformIsReadyForParallelTesting()) {
@@ -115,4 +117,6 @@ if (isQt()) {
}
my $harnessPath = File::Spec->catfile(relativeScriptsDir(), $harnessName);
-exec $harnessPath ($harnessPath, @ARGV) or die "Failed to execute $harnessPath";
+unshift(@ARGV, $harnessPath);
+unshift(@ARGV, $script);
+system(@ARGV) == 0 or die "Failed to execute $harnessPath";
diff --git a/Tools/Scripts/sync-master-with-upstream b/Tools/Scripts/sync-master-with-upstream
new file mode 100755
index 000000000..d2e666c2d
--- /dev/null
+++ b/Tools/Scripts/sync-master-with-upstream
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright 2012 Google, Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY GOOGLE INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script is intended to support the GitHub workflow described here:
+# https://trac.webkit.org/wiki/UsingGitHub
+#
+# This script fetches the latest changes from upstream, and pushes those
+# changes to the master branch in origin (e.g., your GitHub fork of WebKit).
+#
+# Running this script periodically will keep your fork of WebKit on GitHub in
+# sync with the "root" WebKit repository in upstream, assuming you've run
+# configure-github-as-upstream
+
+import subprocess
+
+def run(args, error_message = None):
+ if subprocess.call(args) != 0:
+ if error_message:
+ print error_message
+ exit(1)
+
+run(["git", "fetch", "upstream"], "Have you run configure-github-as-upstream to configure an upstream repository?")
+run(["git", "push", "origin", "upstream/master:master"])
+print "\nConsider running 'git merge origin' to update your local branches."
diff --git a/Tools/Scripts/test-webkitpy b/Tools/Scripts/test-webkitpy
index 4298feae2..857c19940 100755
--- a/Tools/Scripts/test-webkitpy
+++ b/Tools/Scripts/test-webkitpy
@@ -44,11 +44,9 @@ _log = logging.getLogger("test-webkitpy")
if __name__ == "__main__":
webkit_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- # FIXME: We should probably test each package separately to avoid naming conflicts.
- dirs = [
- os.path.join(webkit_root, 'Tools', 'Scripts'),
- os.path.join(webkit_root, 'Source', 'WebKit2', 'Scripts'),
- ]
+ tester = main.Tester()
+ tester.add_tree(os.path.join(webkit_root, 'Tools', 'Scripts'), 'webkitpy')
+ tester.add_tree(os.path.join(webkit_root, 'Source', 'WebKit2', 'Scripts'), 'webkit2')
# FIXME: Do we need to be able to test QueueStatusServer on Windows as well?
appengine_sdk_path = '/usr/local/google_appengine'
@@ -59,17 +57,8 @@ if __name__ == "__main__":
from google.appengine.dist import use_library
use_library('django', '1.2')
dev_appserver.fix_sys_path()
- dirs.append(os.path.join(webkit_root, 'Tools', 'QueueStatusServer'))
+ tester.add_tree(os.path.join(webkit_root, 'Tools', 'QueueStatusServer'))
else:
_log.info('Skipping QueueStatusServer tests; the Google AppEngine Python SDK is not installed.')
- main.Tester.clean_packages(dirs)
-
- tester = main.Tester()
- options, args = tester.parse_args(sys.argv)
- tester.configure(options)
-
- # Make sure PYTHONPATH is set up correctly so that all of the imports will work.
- sys.path = [d for d in dirs if d not in sys.path] + sys.path
-
- sys.exit(not tester.run(dirs, args))
+ sys.exit(not tester.run())
diff --git a/Tools/Scripts/update-webkit-chromium b/Tools/Scripts/update-webkit-chromium
index 8406b1d01..550830e1c 100755
--- a/Tools/Scripts/update-webkit-chromium
+++ b/Tools/Scripts/update-webkit-chromium
@@ -36,6 +36,7 @@ use webkitdirs;
determineIsChromiumAndroid();
+chdirWebKit();
chdir("Source/WebKit/chromium") or die $!;
# Find gclient or install it.
@@ -61,17 +62,17 @@ if (! -e ".gclient") {
# When building WebKit's Chromium port for Android, we need the Android NDK as
# it will allow us to cross-compile all sources to the target architecture.
if (isChromiumAndroid()) {
- if (! -e "android-ndk-r7") {
- print "Installing the Android NDK, version 7...\n";
+ if (! -e "android-ndk-r7b") {
+ print "Installing the Android NDK, version 7b...\n";
my $host_os = isLinux() ? "linux" : "darwin";
- my $result = system("curl", "-o", "android-ndk-r7.tar.bz2", "http://dl.google.com/android/ndk/android-ndk-r7-" . $host_os . "-x86.tar.bz2");
+ my $result = system("curl", "-o", "android-ndk-r7b.tar.bz2", "http://dl.google.com/android/ndk/android-ndk-r7b-" . $host_os . "-x86.tar.bz2");
die "Couldn't download the Android NDK." if $result;
- $result = system("tar", "jx", "-f", "android-ndk-r7.tar.bz2");
+ $result = system("tar", "jx", "-f", "android-ndk-r7b.tar.bz2");
die "Couldn't extract the Android NDK." if $result;
}
- $ENV{ANDROID_NDK_ROOT} = sourceDir() . "/Source/WebKit/chromium/android-ndk-r7";
+ $ENV{ANDROID_NDK_ROOT} = sourceDir() . "/Source/WebKit/chromium/android-ndk-r7b";
$ENV{WEBKIT_ANDROID_BUILD} = 1;
}
diff --git a/Tools/Scripts/update-webkit-libs-jhbuild b/Tools/Scripts/update-webkit-libs-jhbuild
new file mode 100755
index 000000000..48343fb30
--- /dev/null
+++ b/Tools/Scripts/update-webkit-libs-jhbuild
@@ -0,0 +1,58 @@
+#!/usr/bin/perl -w
+# Copyright (C) 2011 Igalia S.L.
+# Copyright (C) 2012 Intel Corporation
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+use Getopt::Long;
+
+my $platformEfl = 0;
+my $platformGtk = 0;
+
+my $getOptionsResult = GetOptions(
+ 'efl' => \$platformEfl,
+ 'gtk' => \$platformGtk
+ );
+
+my $platform = "";
+if (!$getOptionsResult) {
+ die "No platform specified for " . basename($0) .". Use --gtk or --efl.\n";
+} else {
+ if ($platformEfl) {
+ $platform = "efl";
+ }
+ if ($platformGtk) {
+ $platform = "gtk";
+ }
+}
+
+sub runJhbuild
+{
+ my $command = shift;
+ my @jhbuildArgs = ("./jhbuild-wrapper", "--".$platform, $command);
+ push(@jhbuildArgs, @ARGV[2..-1]);
+ system(@jhbuildArgs) == 0 or die "Running jhbuild-wrapper " . $command . " failed.\n";
+}
+
+delete $ENV{AR_FLAGS} if exists $ENV{AR_FLAGS};
+
+chdir(relativeScriptsDir() . "/../jhbuild") or die $!;
+
+my %prettyPlatform = ( "efl" => "EFL", "gtk" => "GTK+" );
+print "Updating " . $prettyPlatform{$platform} . " port dependencies using jhbuild...\n";
+runJhbuild("build");
diff --git a/Tools/Scripts/update-webkitefl-libs b/Tools/Scripts/update-webkitefl-libs
new file mode 100755
index 000000000..cbdb55ca7
--- /dev/null
+++ b/Tools/Scripts/update-webkitefl-libs
@@ -0,0 +1,23 @@
+#!/usr/bin/perl -w
+# Copyright (C) 2012 Intel Corporation
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+
+my $scriptsDir = relativeScriptsDir();
+system("perl", "$scriptsDir/update-webkit-libs-jhbuild", "--efl", sys.argv) == 0 or die $!;
diff --git a/Tools/Scripts/update-webkitgtk-libs b/Tools/Scripts/update-webkitgtk-libs
index 8033c62eb..792cc28ab 100755
--- a/Tools/Scripts/update-webkitgtk-libs
+++ b/Tools/Scripts/update-webkitgtk-libs
@@ -1,5 +1,6 @@
#!/usr/bin/perl -w
# Copyright (C) 2011 Igalia S.L.
+# Copyright (C) 2012 Intel Corporation
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -19,18 +20,5 @@ use FindBin;
use lib $FindBin::Bin;
use webkitdirs;
-delete $ENV{AR_FLAGS} if exists $ENV{AR_FLAGS};
-
-chdir(relativeScriptsDir() . "/../gtk") or die $!;
-
-my @ensureJhbuildArgs = ("./run-with-jhbuild", "echo", "Ensured jhbuild setup.");
-if (system(@ensureJhbuildArgs) != 0) {
- die "Failed to ensure jhbuild installed.\n";
-}
-
-print "Updating GTK+ port dependencies using jhbuild...\n";
-my @jhbuildArgs = ("../../WebKitBuild/Dependencies/Root/bin/jhbuild", "--no-interact", "-f", "jhbuildrc");
-push(@jhbuildArgs, @ARGV[1..-1]);
-if (system(@jhbuildArgs) != 0) {
- die "Running jhbuild failed.\n"
-}
+my $scriptsDir = relativeScriptsDir();
+system("perl", "$scriptsDir/update-webkit-libs-jhbuild", "--gtk", @ARGV) == 0 or die $!;
diff --git a/Tools/Scripts/webkit-build-directory b/Tools/Scripts/webkit-build-directory
index dab1d6622..d5085b594 100755
--- a/Tools/Scripts/webkit-build-directory
+++ b/Tools/Scripts/webkit-build-directory
@@ -42,15 +42,26 @@ my $showTopLevelDirectory = 0;
my $programName = basename($0);
my $usage = <<EOF;
Usage: $programName [options]
- --configuration Show the build directory for a specific configuration (e.g. Debug, Release. Defaults to the active configuration set by set-webkit-configuration)
- -h|--help Show this help message
- --top-level Show the top-level build directory
+ --configuration Show the build directory for a specific configuration (e.g. Debug, Release. Defaults to the active configuration set by set-webkit-configuration)
+ -h|--help Show this help message
+ --top-level Show the top-level build directory
+
+ --blackberry Find the build directory for the BlackBerry port on Mac/Linux
+ --chromium Find the build directory for the Chromium port on Mac/Win/Linux
+ --chromium-android Find the build directory for the Chromium port on Android
+ --efl Find the build directory for the EFL port
+ --gtk Find the build directory for the GTK+ port
+ --qt Find the build directory for the Qt port
+ --wincairo Find the build directory for using Cairo (rather than CoreGraphics) on Windows
+ --wince Find the build directory for the WinCE port
Either --configuration or --top-level is required.
EOF
setConfiguration(); # Figure out from the command line if we're --debug or --release or the default.
+# FIXME: Check if extra flags are valid or not.
+Getopt::Long::Configure('pass_through'); # Let --blackberry, etc... be handled by webkitdirs
my $getOptionsResult = GetOptions(
'configuration' => \$showConfigurationDirectory,
'top-level' => \$showTopLevelDirectory,
diff --git a/Tools/Scripts/webkit-tools-completion.sh b/Tools/Scripts/webkit-tools-completion.sh
index 74fb73146..76578e159 100755
--- a/Tools/Scripts/webkit-tools-completion.sh
+++ b/Tools/Scripts/webkit-tools-completion.sh
@@ -37,6 +37,20 @@ __webkit-patch_generate_reply()
COMPREPLY=( $(compgen -W "$1" -- "${COMP_WORDS[COMP_CWORD]}") )
}
+__webkit-patch_upload_cc_generate_reply()
+{
+ # Completion is done on tokens and our comma-separated list is one single token, so we have to do completion on the whole list each time.
+ # Return a \n separated list for each possible bugzilla email completion of the substring following the last comma.
+ # Redirect strerr to /dev/null to prevent noise in the shell if this ever breaks somehow.
+ COMPREPLY=( $(PYTHONPATH=$(dirname "${BASH_SOURCE[0]}") python -c "
+import sys,re
+from webkitpy.common.config.committers import CommitterList
+m = re.match('((.*,)*)(.*)', sys.argv[1])
+untilLastComma = m.group(1)
+afterLastComma = m.group(3)
+print('\n'.join([untilLastComma + c.bugzilla_email() + ',' for c in CommitterList().contributors() if c.bugzilla_email().startswith(afterLastComma)]))" "${COMP_WORDS[COMP_CWORD]}" 2>/dev/null ) )
+}
+
_webkit-patch_complete()
{
local command current_command="${COMP_WORDS[1]}"
@@ -78,6 +92,10 @@ _webkit-patch_complete()
return
;;
upload)
+ if [[ ${COMP_WORDS[COMP_CWORD-1]} == "--cc" || ${COMP_WORDS[COMP_CWORD-1]} == "=" && ${COMP_WORDS[COMP_CWORD-2]} == "--cc" ]]; then
+ __webkit-patch_upload_cc_generate_reply
+ return
+ fi
__webkit-patch_generate_reply "--description --no-obsolete --no-review --request-commit --cc -m --open-bug"
return
;;
diff --git a/Tools/Scripts/webkitdirs.pm b/Tools/Scripts/webkitdirs.pm
index 85e2f86f3..c471a9470 100755
--- a/Tools/Scripts/webkitdirs.pm
+++ b/Tools/Scripts/webkitdirs.pm
@@ -1,4 +1,4 @@
-# Copyright (C) 2005, 2006, 2007, 2010, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2005, 2006, 2007, 2010, 2011, 2012 Apple Inc. All rights reserved.
# Copyright (C) 2009 Google Inc. All rights reserved.
# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
#
@@ -56,6 +56,7 @@ BEGIN {
&cmakeBasedPortName
&currentSVNRevision
&debugSafari
+ &nmPath
&passedConfiguration
&printHelpAndExitForRunAndDebugWebKitAppIfNeeded
&productDir
@@ -81,6 +82,7 @@ my $configurationForVisualStudio;
my $configurationProductDir;
my $sourceDir;
my $currentSVNRevision;
+my $nmPath;
my $osXVersion;
my $generateDsym;
my $isQt;
@@ -311,13 +313,9 @@ sub determineArchitecture
if ($architecture) {
chomp $architecture;
} else {
- if (isLeopard()) {
- $architecture = `arch`;
- } else {
- my $supports64Bit = `sysctl -n hw.optional.x86_64`;
- chomp $supports64Bit;
- $architecture = $supports64Bit ? 'x86_64' : `arch`;
- }
+ my $supports64Bit = `sysctl -n hw.optional.x86_64`;
+ chomp $supports64Bit;
+ $architecture = $supports64Bit ? 'x86_64' : `arch`;
chomp $architecture;
}
}
@@ -930,6 +928,7 @@ sub blackberryCMakeArguments()
if ($cpu eq "a9") {
$cpu = $arch . "v7le";
push @cmakeExtraOptions, '-DTARGETING_PLAYBOOK=1';
+ push @cmakeExtraOptions, '-DENABLE_GLES2=1';
}
my $stageDir = $ENV{"STAGE_DIR"};
@@ -956,6 +955,7 @@ sub blackberryCMakeArguments()
push @cmakeExtraOptions, "-DCMAKE_SKIP_RPATH='ON'" if isDarwin();
push @cmakeExtraOptions, "-DENABLE_DRT=1" if $ENV{"ENABLE_DRT"};
+ push @cmakeExtraOptions, "-DENABLE_GLES2=1" if $ENV{"ENABLE_GLES2"};
my @includeSystemDirectories;
push @includeSystemDirectories, File::Spec->catdir($stageInc, "grskia", "skia");
@@ -965,6 +965,7 @@ sub blackberryCMakeArguments()
push @includeSystemDirectories, $stageInc;
push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "platform");
push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "qsk");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "ots");
my @cxxFlags;
push @cxxFlags, "-Wl,-rpath-link,$stageLib";
@@ -979,7 +980,6 @@ sub blackberryCMakeArguments()
}
my @cmakeArgs;
- push @cmakeArgs, "-DPUBLIC_BUILD=0";
push @cmakeArgs, '-DCMAKE_SYSTEM_NAME="QNX"';
push @cmakeArgs, "-DCMAKE_SYSTEM_PROCESSOR=\"$cpuDir\"";
push @cmakeArgs, '-DCMAKE_SYSTEM_VERSION="1"';
@@ -1152,7 +1152,7 @@ sub determineIsChromiumNinja()
my $hasUpToDateNinjabuild = 0;
if (-e "out/$config/build.ninja") {
- my $statNinja = stat("out/$config/build.ninja");
+ my $statNinja = stat("out/$config/build.ninja")->mtime;
my $statXcode = 0;
if (-e 'Source/WebKit/chromium/WebKit.xcodeproj') {
@@ -1161,7 +1161,7 @@ sub determineIsChromiumNinja()
my $statMake = 0;
if (-e 'Makefile.chromium') {
- $statXcode = stat('Makefile.chromium')->mtime;
+ $statMake = stat('Makefile.chromium')->mtime;
}
$hasUpToDateNinjabuild = $statNinja > $statXcode && $statNinja > $statMake;
@@ -1308,6 +1308,23 @@ sub isPerianInstalled()
return 0;
}
+sub determineNmPath()
+{
+ return if $nmPath;
+
+ if (isAppleMacWebKit()) {
+ $nmPath = `xcrun -find nm`;
+ chomp $nmPath;
+ }
+ $nmPath = "nm" if !$nmPath;
+}
+
+sub nmPath()
+{
+ determineNmPath();
+ return $nmPath;
+}
+
sub determineOSXVersion()
{
return if $osXVersion;
@@ -1333,11 +1350,6 @@ sub osXVersion()
return $osXVersion;
}
-sub isLeopard()
-{
- return isDarwin() && osXVersion()->{"minor"} == 5;
-}
-
sub isSnowLeopard()
{
return isDarwin() && osXVersion()->{"minor"} == 6;
@@ -1792,14 +1804,6 @@ sub runAutogenForAutotoolsProjectIfNecessary($@)
print "Calling autogen.sh in " . $dir . "\n\n";
print "Installation prefix directory: $prefix\n" if(defined($prefix));
- # Save md5sum for jhbuild-related files.
- foreach my $file (qw(jhbuildrc jhbuild.modules)) {
- my $path = join('/', $sourceDir, 'Tools', 'gtk', $file);
- open(SUM, ">$file.md5sum");
- print SUM getMD5HashForFile($path);
- close(SUM);
- }
-
# Only for WebKit, write the autogen.sh arguments to a file so that we can detect
# when they change and automatically re-run it.
if ($project eq 'WebKit') {
@@ -1828,6 +1832,35 @@ sub runAutogenForAutotoolsProjectIfNecessary($@)
}
}
+sub getJhbuildPath()
+{
+ return join('/', baseProductDir(), "Dependencies");
+}
+
+sub jhbuildConfigurationChanged()
+{
+ foreach my $file (qw(jhbuildrc.md5sum jhbuild.modules.md5sum)) {
+ my $path = join('/', getJhbuildPath(), $file);
+ if (! -e $path) {
+ return 1;
+ }
+
+ # Get the md5 sum of the file we're testing.
+ $file =~ m/(.+)\.md5sum/;
+ my $actualFile = join('/', $sourceDir, 'Tools', 'gtk', $1);
+ my $currentSum = getMD5HashForFile($actualFile);
+
+ # Get our previous record.
+ open(PREVIOUS_MD5, $path);
+ chomp(my $previousSum = <PREVIOUS_MD5>);
+ close(PREVIOUS_MD5);
+
+ if ($previousSum ne $currentSum) {
+ return 1;
+ }
+ }
+}
+
sub mustReRunAutogen($@)
{
my ($sourceDir, $filename, @currentArguments) = @_;
@@ -1851,27 +1884,6 @@ sub mustReRunAutogen($@)
return 1;
}
- # Now check jhbuild configuration for changes.
- foreach my $file (qw(jhbuildrc.md5sum jhbuild.modules.md5sum)) {
- if (! -e $file) {
- return 1;
- }
-
- # Get the md5 sum of the file we're testing.
- $file =~ m/(.+)\.md5sum/;
- my $actualFile = join('/', $sourceDir, 'Tools', 'gtk', $1);
- my $currentSum = getMD5HashForFile($actualFile);
-
- # Get our previous record.
- open(PREVIOUS_MD5, $file);
- chomp(my $previousSum = <PREVIOUS_MD5>);
- close(PREVIOUS_MD5);
-
- if ($previousSum ne $currentSum) {
- return 1;
- }
- }
-
return 0;
}
@@ -1898,11 +1910,6 @@ sub buildAutotoolsProject($@)
return 0;
}
- # We might need to update jhbuild dependencies.
- if (checkForArgumentAndRemoveFromArrayRef("--update-gtk", \@buildParams)) {
- system("perl", "$sourceDir/Tools/Scripts/update-webkitgtk-libs") == 0 or die $!;
- }
-
my @buildArgs = ();
my $makeArgs = $ENV{"WebKitMakeArguments"} || "";
for my $i (0 .. $#buildParams) {
@@ -1925,6 +1932,8 @@ sub buildAutotoolsProject($@)
# WebKit is the default target, so we don't need to specify anything.
if ($project eq "JavaScriptCore") {
$makeArgs .= " jsc";
+ } elsif ($project eq "WTF") {
+ $makeArgs .= " libWTF.la";
}
$prefix = $ENV{"WebKitInstallationPrefix"} if !defined($prefix);
@@ -1938,6 +1947,44 @@ sub buildAutotoolsProject($@)
push @buildArgs, "--disable-debug";
}
+ # We might need to update jhbuild dependencies.
+ my $needUpdate = 0;
+ if (jhbuildConfigurationChanged()) {
+ # If the configuration changed, dependencies may have been removed.
+ # Since we lack a granular way of uninstalling those we wipe out the
+ # jhbuild root and start from scratch.
+ my $jhbuildPath = getJhbuildPath();
+ if (system("rm -rf $jhbuildPath/Root") ne 0) {
+ die "Cleaning jhbuild root failed!";
+ }
+
+ if (system("perl $sourceDir/Tools/jhbuild/jhbuild-wrapper --gtk clean") ne 0) {
+ die "Cleaning jhbuild modules failed!";
+ }
+
+ $needUpdate = 1;
+ }
+
+ if (checkForArgumentAndRemoveFromArrayRef("--update-gtk", \@buildArgs)) {
+ $needUpdate = 1;
+ }
+
+ if ($needUpdate) {
+ # Force autogen to run, to catch the possibly updated libraries.
+ system("rm -f previous-autogen-arguments.txt");
+
+ system("perl", "$sourceDir/Tools/Scripts/update-webkitgtk-libs") == 0 or die $!;
+ }
+
+ # Save md5sum for jhbuild-related files.
+ foreach my $file (qw(jhbuildrc jhbuild.modules)) {
+ my $source = join('/', $sourceDir, "Tools", "gtk", $file);
+ my $destination = join('/', getJhbuildPath(), $file);
+ open(SUM, ">$destination" . ".md5sum");
+ print SUM getMD5HashForFile($source);
+ close(SUM);
+ }
+
# If GNUmakefile exists, don't run autogen.sh unless its arguments
# have changed. The makefile should be smart enough to track autotools
# dependencies and re-run autogen.sh when build files change.
@@ -1953,9 +2000,7 @@ sub buildAutotoolsProject($@)
if ($project eq 'WebKit' && !isCrossCompilation()) {
my @docGenerationOptions = ($runWithJhbuild, "$gtkScriptsPath/generate-gtkdoc", "--skip-html");
- if ($debug) {
- push(@docGenerationOptions, "--debug");
- }
+ push(@docGenerationOptions, productDir());
if (system(@docGenerationOptions)) {
die "\n gtkdoc did not build without warnings\n";
@@ -1965,6 +2010,14 @@ sub buildAutotoolsProject($@)
return 0;
}
+sub jhbuildWrapperPrefixIfNeeded()
+{
+ if (isEfl()) {
+ return File::Spec->catfile(sourceDir(), "Tools", "efl", "run-with-jhbuild");
+ }
+ return "";
+}
+
sub generateBuildSystemFromCMakeProject
{
my ($port, $prefixPath, @cmakeArgs, $additionalCMakeArgs) = @_;
@@ -1990,7 +2043,8 @@ sub generateBuildSystemFromCMakeProject
# We call system("cmake @args") instead of system("cmake", @args) so that @args is
# parsed for shell metacharacters.
- my $returnCode = system("cmake @args");
+ my $wrapper = jhbuildWrapperPrefixIfNeeded() . " ";
+ my $returnCode = system($wrapper . "cmake @args");
chdir($originalWorkingDirectory);
return $returnCode;
@@ -2009,7 +2063,8 @@ sub buildCMakeGeneratedProject($)
# We call system("cmake @args") instead of system("cmake", @args) so that @args is
# parsed for shell metacharacters. In particular, $makeArgs may contain such metacharacters.
- return system("cmake @args");
+ my $wrapper = jhbuildWrapperPrefixIfNeeded() . " ";
+ return system($wrapper . "cmake @args");
}
sub cleanCMakeGeneratedProject()
@@ -2033,6 +2088,7 @@ sub buildCMakeProjectOrExit($$$$@)
exit($returnCode) if $returnCode;
$returnCode = exitStatus(buildCMakeGeneratedProject($makeArgs));
exit($returnCode) if $returnCode;
+ return 0;
}
sub cmakeBasedPortArguments()
@@ -2274,8 +2330,8 @@ sub buildGtkProject
{
my ($project, $clean, @buildArgs) = @_;
- if ($project ne "WebKit" and $project ne "JavaScriptCore") {
- die "Unsupported project: $project. Supported projects: WebKit, JavaScriptCore\n";
+ if ($project ne "WebKit" and $project ne "JavaScriptCore" and $project ne "WTF") {
+ die "Unsupported project: $project. Supported projects: WebKit, JavaScriptCore, WTF\n";
}
return buildAutotoolsProject($project, $clean, @buildArgs);
@@ -2294,19 +2350,7 @@ sub buildChromiumMakefile($$@)
$makeArgs = $1 if /^--makeargs=(.*)/i;
}
$makeArgs = "-j$numCpus" if not $makeArgs;
- my $command = "";
-
- # Building the WebKit Chromium port for Android requires us to cross-
- # compile, which will be set up by Chromium's envsetup.sh. The script itself
- # will verify that the installed NDK is indeed available.
- if (isChromiumAndroid()) {
- $command .= "bash -c \"source " . sourceDir() . "/Source/WebKit/chromium/build/android/envsetup.sh && ";
- $ENV{ANDROID_NDK_ROOT} = sourceDir() . "/Source/WebKit/chromium/android-ndk-r7";
- $ENV{WEBKIT_ANDROID_BUILD} = 1;
- }
-
- $command .= "make -fMakefile.chromium $makeArgs BUILDTYPE=$config $target";
- $command .= "\"" if isChromiumAndroid();
+ my $command .= "make -fMakefile.chromium $makeArgs BUILDTYPE=$config $target";
print "$command\n";
return system $command;
@@ -2435,7 +2479,7 @@ EOF
sub argumentsForRunAndDebugMacWebKitApp()
{
my @args = @ARGV;
- push @args, ("-ApplePersistenceIgnoreState", "YES") if isLion() && checkForArgumentAndRemoveFromArrayRef("--no-saved-state", \@args);
+ push @args, ("-ApplePersistenceIgnoreState", "YES") if !isSnowLeopard() && checkForArgumentAndRemoveFromArrayRef("--no-saved-state", \@args);
return @args;
}
diff --git a/Tools/Scripts/webkitperl/FeatureList.pm b/Tools/Scripts/webkitperl/FeatureList.pm
new file mode 100644
index 000000000..9a286807a
--- /dev/null
+++ b/Tools/Scripts/webkitperl/FeatureList.pm
@@ -0,0 +1,359 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# A module to contain all the enable/disable feature option code.
+
+use strict;
+use warnings;
+
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+
+BEGIN {
+ use Exporter ();
+ our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
+ $VERSION = 1.00;
+ @ISA = qw(Exporter);
+ @EXPORT = qw(&getFeatureOptionList);
+ %EXPORT_TAGS = ( );
+ @EXPORT_OK = ();
+}
+
+my (
+ $threeDRenderingSupport,
+ $accelerated2DCanvasSupport,
+ $animationAPISupport,
+ $batteryStatusSupport,
+ $blobSupport,
+ $channelMessagingSupport,
+ $cssFiltersSupport,
+ $cssGridLayoutSupport,
+ $cssShadersSupport,
+ $dataTransferItemsSupport,
+ $datalistSupport,
+ $detailsSupport,
+ $deviceOrientationSupport,
+ $directoryUploadSupport,
+ $downloadAttributeSupport,
+ $fileSystemSupport,
+ $filtersSupport,
+ $ftpDirSupport,
+ $fullscreenAPISupport,
+ $gamepadSupport,
+ $geolocationSupport,
+ $highDPICanvasSupport,
+ $icondatabaseSupport,
+ $imageResizerSupport,
+ $indexedDatabaseSupport,
+ $inputSpeechSupport,
+ $inputTypeColorSupport,
+ $inputTypeDateSupport,
+ $inputTypeDatetimeSupport,
+ $inputTypeDatetimelocalSupport,
+ $inputTypeMonthSupport,
+ $inputTypeTimeSupport,
+ $inputTypeWeekSupport,
+ $inspectorSupport,
+ $javascriptDebuggerSupport,
+ $legacyNotificationsSupport,
+ $legacyWebKitBlobBuilderSupport,
+ $linkPrefetchSupport,
+ $linkPrerenderSupport,
+ $mathmlSupport,
+ $mediaSourceSupport,
+ $mediaStatisticsSupport,
+ $mediaStreamSupport,
+ $meterTagSupport,
+ $mhtmlSupport,
+ $microdataSupport,
+ $mutationObserversSupport,
+ $netscapePluginAPISupport,
+ $networkInfoSupport,
+ $notificationsSupport,
+ $orientationEventsSupport,
+ $pageVisibilityAPISupport,
+ $progressTagSupport,
+ $quotaSupport,
+ $registerProtocolHandlerSupport,
+ $requestAnimationFrameSupport,
+ $scriptedSpeechSupport,
+ $shadowDOMSupport,
+ $sharedWorkersSupport,
+ $sqlDatabaseSupport,
+ $styleScopedSupport,
+ $svgDOMObjCBindingsSupport,
+ $svgFontsSupport,
+ $svgSupport,
+ $systemMallocSupport,
+ $tiledBackingStoreSupport,
+ $touchEventsSupport,
+ $touchIconLoadingSupport,
+ $vibrationSupport,
+ $videoSupport,
+ $videoTrackSupport,
+ $webglSupport,
+ $webAudioSupport,
+ $webSocketsSupport,
+ $webTimingSupport,
+ $workersSupport,
+ $xsltSupport,
+);
+
+my @features = (
+ { option => "3d-rendering", desc => "Toggle 3D Rendering support",
+ define => "ENABLE_3D_RENDERING", default => (isAppleMacWebKit() || isQt()), value => \$threeDRenderingSupport },
+
+ { option => "accelerated-2d-canvas", desc => "Toggle Accelerated 2D Canvas support",
+ define => "ENABLE_ACCELERATED_2D_CANVAS", default => 0, value => \$accelerated2DCanvasSupport },
+
+ { option => "animation-api", desc => "Toggle Animation API support",
+ define => "ENABLE_ANIMATION_API", default => isBlackBerry(), value => \$animationAPISupport },
+
+ { option => "battery-status", desc => "Toggle Battery Status support",
+ define => "ENABLE_BATTERY_STATUS", default => (isEfl() || isBlackBerry()), value => \$batteryStatusSupport },
+
+ { option => "blob", desc => "Toggle Blob support",
+ define => "ENABLE_BLOB", default => (isAppleMacWebKit() || isGtk() || isChromium() || isBlackBerry()), value => \$blobSupport },
+
+ { option => "channel-messaging", desc => "Toggle Channel Messaging support",
+ define => "ENABLE_CHANNEL_MESSAGING", default => 1, value => \$channelMessagingSupport },
+
+ { option => "css-filters", desc => "Toggle CSS Filters support",
+ define => "ENABLE_CSS_FILTERS", default => isAppleWebKit(), value => \$cssFiltersSupport },
+
+ { option => "css-grid-layout", desc => "Toggle CSS Grid Layout support",
+ define => "ENABLE_CSS_GRID_LAYOUT", default => 0, value => \$cssGridLayoutSupport },
+
+ { option => "css-shaders", desc => "Toggle CSS Shaders support",
+ define => "ENABLE_CSS_SHADERS", default => 0, value => \$cssShadersSupport },
+
+ { option => "datalist", desc => "Toggle Datalist support",
+ define => "ENABLE_DATALIST", default => 0, value => \$datalistSupport },
+
+ { option => "data-transfer-items", desc => "Toggle Data Transfer Items support",
+ define => "ENABLE_DATA_TRANSFER_ITEMS", default => 0, value => \$dataTransferItemsSupport },
+
+ { option => "details", desc => "Toggle Details support",
+ define => "ENABLE_DETAILS", default => 1, value => \$detailsSupport },
+
+ { option => "device-orientation", desc => "Toggle Device Orientation support",
+ define => "ENABLE_DEVICE_ORIENTATION", default => isBlackBerry(), value => \$deviceOrientationSupport },
+
+ { option => "directory-upload", desc => "Toogle Directory Upload support",
+ define => "ENABLE_DIRECTORY_UPLOAD", default => 0, value => \$directoryUploadSupport },
+
+ { option => "download-attribute", desc => "Toggle Download Attribute support",
+ define => "ENABLE_DOWNLOAD_ATTRIBUTE", default => isBlackBerry(), value => \$downloadAttributeSupport },
+
+ { option => "file-system", desc => "Toggle File System support",
+ define => "ENABLE_FILE_SYSTEM", default => isBlackBerry(), value => \$fileSystemSupport },
+
+ { option => "filters", desc => "Toggle Filters support",
+ define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isQt() || isEfl() || isBlackBerry()), value => \$filtersSupport },
+
+ { option => "ftpdir", desc => "Toggle FTP Directory support",
+ define => "ENABLE_FTPDIR", default => !isWinCE(), value => \$ftpDirSupport },
+
+ { option => "fullscreen-api", desc => "Toggle Fullscreen API support",
+ define => "ENABLE_FULLSCREEN_API", default => (isAppleMacWebKit() || isGtk() || isBlackBerry() || isQt()), value => \$fullscreenAPISupport },
+
+ { option => "gamepad", desc => "Toggle Gamepad support",
+ define => "ENABLE_GAMEPAD", default => 0, value => \$gamepadSupport },
+
+ { option => "geolocation", desc => "Toggle Geolocation support",
+ define => "ENABLE_GEOLOCATION", default => (isAppleWebKit() || isGtk() || isBlackBerry()), value => \$geolocationSupport },
+
+ { option => "high-dpi-canvas", desc => "Toggle High DPI Canvas support",
+ define => "ENABLE_HIGH_DPI_CANVAS", default => (isAppleWebKit()), value => \$highDPICanvasSupport },
+
+ { option => "icon-database", desc => "Toggle Icondatabase support",
+ define => "ENABLE_ICONDATABASE", default => 1, value => \$icondatabaseSupport },
+
+ { option => "indexed-database", desc => "Toggle Indexed Database support",
+ define => "ENABLE_INDEXED_DATABASE", default => 0, value => \$indexedDatabaseSupport },
+
+ { option => "input-speech", desc => "Toggle Input Speech support",
+ define => "ENABLE_INPUT_SPEECH", default => 0, value => \$inputSpeechSupport },
+
+ { option => "input-type-color", desc => "Toggle Input Type Color support",
+ define => "ENABLE_INPUT_TYPE_COLOR", default => (isBlackBerry() || isEfl()), value => \$inputTypeColorSupport },
+
+ { option => "input-type-date", desc => "Toggle Input Type Date support",
+ define => "ENABLE_INPUT_TYPE_DATE", default => 0, value => \$inputTypeDateSupport },
+
+ { option => "input-type-datetime", desc => "Toggle Input Type Datetime support",
+ define => "ENABLE_INPUT_TYPE_DATETIME", default => 0, value => \$inputTypeDatetimeSupport },
+
+ { option => "input-type-datetimelocal", desc => "Toggle Input Type Datetimelocal support",
+ define => "ENABLE_INPUT_TYPE_DATETIMELOCAL", default => 0, value => \$inputTypeDatetimelocalSupport },
+
+ { option => "input-type-month", desc => "Toggle Input Type Month support",
+ define => "ENABLE_INPUT_TYPE_MONTH", default => 0, value => \$inputTypeMonthSupport },
+
+ { option => "input-type-time", desc => "Toggle Input Type Time support",
+ define => "ENABLE_INPUT_TYPE_TIME", default => 0, value => \$inputTypeTimeSupport },
+
+ { option => "input-type-week", desc => "Toggle Input Type Week support",
+ define => "ENABLE_INPUT_TYPE_WEEK", default => 0, value => \$inputTypeWeekSupport },
+
+ { option => "inspector", desc => "Toggle Inspector support",
+ define => "ENABLE_INSPECTOR", default => !isWinCE(), value => \$inspectorSupport },
+
+ { option => "javascript-debugger", desc => "Toggle JavaScript Debugger support",
+ define => "ENABLE_JAVASCRIPT_DEBUGGER", default => 1, value => \$javascriptDebuggerSupport },
+
+ { option => "legacy-notifications", desc => "Toggle Legacy Notifications support",
+ define => "ENABLE_LEGACY_NOTIFICATIONS", default => isBlackBerry(), value => \$legacyNotificationsSupport },
+
+ { option => "legacy-webkit-blob-builder", desc => "Toggle Legacy WebKit Blob Builder support",
+ define => "ENABLE_LEGACY_WEBKIT_BLOB_BUILDER", default => (isGtk() || isChromium() || isBlackBerry()), value => \$legacyWebKitBlobBuilderSupport },
+
+ { option => "link-prefetch", desc => "Toggle Link Prefetch support",
+ define => "ENABLE_LINK_PREFETCH", default => 0, value => \$linkPrefetchSupport },
+
+ { option => "link-prerender", desc => "Toggle Link Prerender support",
+ define => "ENABLE_LINK_PRERENDER", default => 0, value => \$linkPrerenderSupport },
+
+ { option => "mathml", desc => "Toggle MathML support",
+ define => "ENABLE_MATHML", default => 1, value => \$mathmlSupport },
+
+ { option => "media-source", desc => "Toggle Media Source support",
+ define => "ENABLE_MEDIA_SOURCE", default => 0, value => \$mediaSourceSupport },
+
+ { option => "media-statistics", desc => "Toggle Media Statistics support",
+ define => "ENABLE_MEDIA_STATISTICS", default => 0, value => \$mediaStatisticsSupport },
+
+ { option => "media-stream", desc => "Toggle Media Stream support",
+ define => "ENABLE_MEDIA_STREAM", default => (isChromium() || isGtk()), value => \$mediaStreamSupport },
+
+ { option => "meter-tag", desc => "Toggle Meter Tag support",
+ define => "ENABLE_METER_TAG", default => !isAppleWinWebKit(), value => \$meterTagSupport },
+
+ { option => "mhtml", desc => "Toggle MHTML support",
+ define => "ENABLE_MHTML", default => 0, value => \$mhtmlSupport },
+
+ { option => "microdata", desc => "Toggle Microdata support",
+ define => "ENABLE_MICRODATA", default => 0, value => \$microdataSupport },
+
+ { option => "mutation-observers", desc => "Toggle Mutation Observers support",
+ define => "ENABLE_MUTATION_OBSERVERS", default => 1, value => \$mutationObserversSupport },
+
+ { option => "netscape-plugin-api", desc => "Toggle Netscape Plugin API support",
+ define => "ENABLE_NETSCAPE_PLUGIN_API", default => !isEfl(), value => \$netscapePluginAPISupport },
+
+ { option => "network-info", desc => "Toggle Network Info support",
+ define => "ENABLE_NETWORK_INFO", default => isEfl(), value => \$networkInfoSupport },
+
+ { option => "notifications", desc => "Toggle Notifications support",
+ define => "ENABLE_NOTIFICATIONS", default => isBlackBerry(), value => \$notificationsSupport },
+
+ { option => "orientation-events", desc => "Toggle Orientation Events support",
+ define => "ENABLE_ORIENTATION_EVENTS", default => isBlackBerry(), value => \$orientationEventsSupport },
+
+ { option => "page-visibility-api", desc => "Toggle Page Visibility API support",
+ define => "ENABLE_PAGE_VISIBILITY_API", default => (isBlackBerry() || isEfl()), value => \$pageVisibilityAPISupport },
+
+ { option => "progress-tag", desc => "Toggle Progress Tag support",
+ define => "ENABLE_PROGRESS_TAG", default => 1, value => \$progressTagSupport },
+
+ { option => "quota", desc => "Toggle Quota support",
+ define => "ENABLE_QUOTA", default => 0, value => \$quotaSupport },
+
+ { option => "register-protocol-handler", desc => "Toggle Register Protocol Handler support",
+ define => "ENABLE_REGISTER_PROTOCOL_HANDLER", default => 0, value => \$registerProtocolHandlerSupport },
+
+ { option => "request-animation-frame", desc => "Toggle Request Animation Frame support",
+ define => "ENABLE_REQUEST_ANIMATION_FRAME", default => (isAppleMacWebKit() || isGtk() || isEfl() || isBlackBerry()), value => \$requestAnimationFrameSupport },
+
+ { option => "scripted-speech", desc => "Toggle Scripted Speech support",
+ define => "ENABLE_SCRIPTED_SPEECH", default => 0, value => \$scriptedSpeechSupport },
+
+ { option => "shadow-dom", desc => "Toggle Shadow DOM support",
+ define => "ENABLE_SHADOW_DOM", default => isGtk(), value => \$shadowDOMSupport },
+
+ { option => "shared-workers", desc => "Toggle Shared Workers support",
+ define => "ENABLE_SHARED_WORKERS", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$sharedWorkersSupport },
+
+ { option => "sql-database", desc => "Toggle SQL Database support",
+ define => "ENABLE_SQL_DATABASE", default => 1, value => \$sqlDatabaseSupport },
+
+ { option => "style-scoped", desc => "Toggle Style Scoped support",
+ define => "ENABLE_STYLE_SCOPED", default => 0, value => \$styleScopedSupport },
+
+ { option => "svg", desc => "Toggle SVG support",
+ define => "ENABLE_SVG", default => 1, value => \$svgSupport },
+
+ { option => "svg-dom-objc-bindings", desc => "Toggle SVG DOM ObjC Bindings support",
+ define => "ENABLE_SVG_DOM_OBJC_BINDINGS", default => isAppleMacWebKit(), value => \$svgDOMObjCBindingsSupport },
+
+ { option => "svg-fonts", desc => "Toggle SVG Fonts support",
+ define => "ENABLE_SVG_FONTS", default => 1, value => \$svgFontsSupport },
+
+ { option => "system-malloc", desc => "Toggle system allocator instead of TCmalloc",
+ define => "USE_SYSTEM_MALLOC", default => isWinCE(), value => \$systemMallocSupport },
+
+ { option => "tiled-backing-store", desc => "Toggle Tiled Backing Store support",
+ define => "WTF_USE_TILED_BACKING_STORE", default => isQt(), value => \$tiledBackingStoreSupport },
+
+ { option => "touch-events", desc => "Toggle Touch Events support",
+ define => "ENABLE_TOUCH_EVENTS", default => (isQt() || isBlackBerry()), value => \$touchEventsSupport },
+
+ { option => "touch-icon-loading", desc => "Toggle Touch Icon Loading Support",
+ define => "ENABLE_TOUCH_ICON_LOADING", default => 0, value => \$touchIconLoadingSupport },
+
+ { option => "vibration", desc => "Toggle Vibration support",
+ define => "ENABLE_VIBRATION", default => (isEfl() || isBlackBerry()), value => \$vibrationSupport },
+
+ { option => "video", desc => "Toggle Video support",
+ define => "ENABLE_VIDEO", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$videoSupport },
+
+ { option => "video-track", desc => "Toggle Video Track support",
+ define => "ENABLE_VIDEO_TRACK", default => (isAppleWebKit() || isGtk() || isEfl()), value => \$videoTrackSupport },
+
+ { option => "webgl", desc => "Toggle WebGL support",
+ define => "ENABLE_WEBGL", default => isAppleMacWebKit(), value => \$webglSupport },
+
+ { option => "web-audio", desc => "Toggle Web Audio support",
+ define => "ENABLE_WEB_AUDIO", default => 0, value => \$webAudioSupport },
+
+ { option => "web-sockets", desc => "Toggle Web Sockets support",
+ define => "ENABLE_WEB_SOCKETS", default => 1, value => \$webSocketsSupport },
+
+ { option => "web-timing", desc => "Toggle Web Timing support",
+ define => "ENABLE_WEB_TIMING", default => (isBlackBerry() || isGtk() || isEfl()), value => \$webTimingSupport },
+
+ { option => "workers", desc => "Toggle Workers support",
+ define => "ENABLE_WORKERS", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$workersSupport },
+
+ { option => "xslt", desc => "Toggle XSLT support",
+ define => "ENABLE_XSLT", default => 1, value => \$xsltSupport },
+);
+
+sub getFeatureOptionList()
+{
+ return @features;
+}
+
+1;
diff --git a/Tools/Scripts/webkitperl/features.pm b/Tools/Scripts/webkitperl/features.pm
index 1df56405b..e546c394b 100644
--- a/Tools/Scripts/webkitperl/features.pm
+++ b/Tools/Scripts/webkitperl/features.pm
@@ -1,4 +1,4 @@
-# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved
+# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2012 Apple Inc. All rights reserved
# Copyright (C) 2006 Alexey Proskuryakov (ap@nypop.com)
# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged
#
@@ -31,13 +31,16 @@
use strict;
use warnings;
+use FindBin;
+use lib $FindBin::Bin;
+use webkitdirs;
+
BEGIN {
use Exporter ();
our ($VERSION, @ISA, @EXPORT, @EXPORT_OK, %EXPORT_TAGS);
$VERSION = 1.00;
@ISA = qw(Exporter);
- @EXPORT = qw(&checkWebCoreFeatureSupport
- &removeLibraryDependingOnFeature);
+ @EXPORT = qw(&checkWebCoreFeatureSupport);
%EXPORT_TAGS = ( );
@EXPORT_OK = ();
}
@@ -53,7 +56,7 @@ sub libraryContainsSymbol($$)
my $foundSymbol = 0;
if (-e $path) {
- open NM, "-|", "nm", $path or die;
+ open NM, "-|", nmPath(), $path or die;
while (<NM>) {
$foundSymbol = 1 if /$symbol/; # FIXME: This should probably check for word boundaries before/after the symbol name.
}
@@ -90,14 +93,4 @@ sub checkWebCoreFeatureSupport($$)
return $hasFeature;
}
-sub removeLibraryDependingOnFeature($$$)
-{
- my ($libraryName, $featureName, $shouldHaveFeature) = @_;
- my $path = builtDylibPathForName($libraryName);
- return unless -x $path;
-
- my $hasFeature = hasFeature($featureName, $path);
- system "rm -f $path" if ($shouldHaveFeature xor $hasFeature);
-}
-
1;
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
index 4e4dc6c49..b305c19b8 100644
--- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer.py
@@ -35,8 +35,7 @@ def _baseline_search_hypergraph(host):
# These edges in the hypergraph aren't visible on build.webkit.org,
# but they impose constraints on how we optimize baselines.
- hypergraph['mac-future'] = ['LayoutTests/platform/mac-future', 'LayoutTests/platform/mac', 'LayoutTests']
- hypergraph['qt-unknown'] = ['LayoutTests/platform/qt-unknown', 'LayoutTests/platform/qt', 'LayoutTests']
+ hypergraph.update(_VIRTUAL_PORTS)
# FIXME: Should we get this constant from somewhere?
fallback_path = ['LayoutTests']
@@ -51,6 +50,12 @@ def _baseline_search_hypergraph(host):
return hypergraph
+_VIRTUAL_PORTS = {
+ 'mac-future': ['LayoutTests/platform/mac-future', 'LayoutTests/platform/mac', 'LayoutTests'],
+ 'qt-unknown': ['LayoutTests/platform/qt-unknown', 'LayoutTests/platform/qt', 'LayoutTests'],
+}
+
+
# FIXME: Should this function be somewhere more general?
def _invert_dictionary(dictionary):
inverted_dictionary = {}
@@ -130,8 +135,18 @@ class BaselineOptimizer(object):
break # Frowns. We do not appear to be converging.
unsatisfied_port_names_by_result = new_unsatisfied_port_names_by_result
+ self._filter_virtual_ports(new_results_by_directory)
return results_by_directory, new_results_by_directory
+ def _filter_virtual_ports(self, new_results_by_directory):
+ for port in _VIRTUAL_PORTS:
+ virtual_directory = _VIRTUAL_PORTS[port][0]
+ if virtual_directory in new_results_by_directory:
+ real_directory = _VIRTUAL_PORTS[port][1]
+ if real_directory not in new_results_by_directory:
+ new_results_by_directory[real_directory] = new_results_by_directory[virtual_directory]
+ del new_results_by_directory[virtual_directory]
+
def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
data_for_result = {}
for directory, result in results_by_directory.items():
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
index 7adac1252..82a637f08 100644
--- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
@@ -75,6 +75,19 @@ class BaselineOptimizerTest(unittest.TestCase):
'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
+ def test_no_add_mac_future(self):
+ self._assertOptimization({
+ 'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
+ 'LayoutTests/platform/win': '453e67177a75b2e79905154ece0efba6e5bfb65d',
+ 'LayoutTests/platform/mac-snowleopard': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
+ 'LayoutTests/platform/chromium-mac': 'a9ba153c700a94ae1b206d8e4a75a621a89b4554',
+ }, {
+ 'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
+ 'LayoutTests/platform/win': '453e67177a75b2e79905154ece0efba6e5bfb65d',
+ 'LayoutTests/platform/mac-snowleopard': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
+ 'LayoutTests/platform/chromium-mac': 'a9ba153c700a94ae1b206d8e4a75a621a89b4554',
+ })
+
def test_chromium_covers_mac_win_linux(self):
self._assertOptimization({
'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
@@ -86,10 +99,10 @@ class BaselineOptimizerTest(unittest.TestCase):
def test_chromium_mac_redundant_with_apple_mac(self):
self._assertOptimization({
- 'LayoutTests/platform/chromium-mac-snowleopard': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
- 'LayoutTests/platform/mac-snowleopard': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+ 'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+ 'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
}, {
- 'LayoutTests/platform/mac-snowleopard': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+ 'LayoutTests/platform/mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
def test_mac_future(self):
diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog.py b/Tools/Scripts/webkitpy/common/checkout/changelog.py
index 55b05982b..3cfe25f80 100644
--- a/Tools/Scripts/webkitpy/common/checkout/changelog.py
+++ b/Tools/Scripts/webkitpy/common/checkout/changelog.py
@@ -39,20 +39,6 @@ import webkitpy.common.config.urls as config_urls
from webkitpy.common.system.deprecated_logging import log
-# FIXME: parse_bug_id should not be a free function.
-# FIXME: Where should this function live in the dependency graph?
-def parse_bug_id(message):
- if not message:
- return None
- match = re.search(config_urls.bug_url_short, message)
- if match:
- return int(match.group('bug_id'))
- match = re.search(config_urls.bug_url_long, message)
- if match:
- return int(match.group('bug_id'))
- return None
-
-
# FIXME: parse_bug_id_from_changelog should not be a free function.
# Parse the bug ID out of a Changelog message based on the format that is
# used by prepare-ChangeLog
@@ -67,7 +53,7 @@ def parse_bug_id_from_changelog(message):
return int(match.group('bug_id'))
# We weren't able to find a bug URL in the format used by prepare-ChangeLog. Fall back to the
# first bug URL found anywhere in the message.
- return parse_bug_id(message)
+ return config_urls.parse_bug_id(message)
class ChangeLogEntry(object):
@@ -147,7 +133,7 @@ class ChangeLogEntry(object):
@staticmethod
def _split_contributor_names(text):
- return re.split(r'\s*(?:,(?:\s+and\s+|&)?|(?:^|\s+)and\s+|[/+&])\s*', text)
+ return re.split(r'\s*(?:,(?:\s+and\s+|&)?|(?:^|\s+)and\s+|&&|[/+&])\s*', text)
def _fuzz_match_reviewers(self, reviewers_text_list):
if not reviewers_text_list:
diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
index 431c86701..52268423d 100644
--- a/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
@@ -425,6 +425,8 @@ class ChangeLogTest(unittest.TestCase):
[('Zan Dobersek', 'zandobersek@gmail.com'), ('Philippe Normand', 'pnormand@igalia.com')])
self._assert_parse_authors('New Contributor <new@webkit.org> and Noob <noob@webkit.org>',
[('New Contributor', 'new@webkit.org'), ('Noob', 'noob@webkit.org')])
+ self._assert_parse_authors('Adam Barth <abarth@webkit.org> && Benjamin Poulain <bpoulain@apple.com>',
+ [('Adam Barth', 'abarth@webkit.org'), ('Benjamin Poulain', 'bpoulain@apple.com')])
def _assert_has_valid_reviewer(self, reviewer_line, expected):
self.assertEqual(self._entry_with_reviewer(reviewer_line).has_valid_reviewer(), expected)
diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py
index f646db7a9..be0e61974 100644
--- a/Tools/Scripts/webkitpy/common/config/committers.py
+++ b/Tools/Scripts/webkitpy/common/config/committers.py
@@ -94,8 +94,10 @@ class Reviewer(Committer):
watchers_who_are_not_contributors = [
Account("Chromium Compositor Bugs", ["cc-bugs@google.com"], ""),
+ Account("Chromium Media Reviews", ["feature-media-reviews@chromium.org"], ""),
Account("David Levin", ["levin+threading@chromium.org"], ""),
Account("David Levin", ["levin+watchlist@chromium.org"], ""),
+ Account("Kent Tamura", ["tkent+wkapi@chromium.org"], ""),
]
@@ -105,19 +107,23 @@ watchers_who_are_not_contributors = [
contributors_who_are_not_committers = [
- Contributor("Adam Kallai", "kallai.adam@stud.u-szeged.hu", 'kadam'),
+ Contributor("Adam Kallai", "kadam@inf.u-szeged.hu", 'kadam'),
Contributor("Aharon Lanin", "aharon@google.com"),
Contributor("Alan Stearns", "stearns@adobe.com", 'astearns'),
+ Contributor("Alec Flett", ["alecflett@chromium.org", "alecflett@google.com"], "alecf"),
Contributor("Alexandre Elias", "aelias@chromium.org"),
Contributor("Alexey Marinichev", ["amarinichev@chromium.org", "amarinichev@google.com"], "amarinichev"),
+ Contributor("Allan Sandfeld Jensen", ["allan.jensen@nokia.com", "kde@carewolf.com", "sandfeld@kde.org"], "carewolf"),
Contributor("Andras Piroska", "pandras@inf.u-szeged.hu", "andris88"),
Contributor("Anne van Kesteren", "annevankesteren+webkit@gmail.com", "annevk"),
Contributor("Annie Sullivan", "sullivan@chromium.org", "annie"),
Contributor("Aryeh Gregor", "ayg@aryeh.name", "AryehGregor"),
- Contributor("Balazs Ankes", "ankes.balazs@stud.u-szeged.hu", 'abalazs'),
+ Contributor("Balazs Ankes", "bank@inf.u-szeged.hu", 'abalazs'),
+ Contributor("Brian Salomon", "bsalomon@google.com"),
Contributor("Commit Queue", "commit-queue@webkit.org"),
- Contributor("Dana Jansens", "danakj@chromium.org", "danakj"),
Contributor("Daniel Sievers", "sievers@chromium.org"),
+ Contributor("Dave Barton", "dbarton@mathscribe.com"),
+ Contributor("Dave Tharp", "dtharp@codeaurora.org", "dtharp"),
Contributor("David Barr", "davidbarr@chromium.org", "barrbrain"),
Contributor("David Dorwin", "ddorwin@chromium.org", "ddorwin"),
Contributor("David Reveman", "reveman@chromium.org", "reveman"),
@@ -129,21 +135,28 @@ contributors_who_are_not_committers = [
Contributor("Grace Kloba", "klobag@chromium.org", "klobag"),
Contributor("Greg Simon", "gregsimon@chromium.org", "gregsimon"),
Contributor("Gregg Tavares", ["gman@google.com", "gman@chromium.org"], "gman"),
+ Contributor("Hao Zheng", "zhenghao@chromium.org"),
Contributor("Ian Hickson", "ian@hixie.ch", "hixie"),
- Contributor("Janos Badics", "dicska@gmail.hu", 'dicska'),
+ Contributor("Janos Badics", "jbadics@inf.u-szeged.hu", 'dicska'),
+ Contributor("Jing Zhao", "jingzhao@chromium.org"),
Contributor("John Bates", ["jbates@google.com", "jbates@chromium.org"], "jbates"),
Contributor("John Bauman", ["jbauman@chromium.org", "jbauman@google.com"], "jbauman"),
- Contributor("Kaustubh Atrawalkar", ["kaustubh@motorola.com"], "silverroots"),
+ Contributor("John Grabowski", "jrg@chromium.org"),
Contributor("Kulanthaivel Palanichamy", "kulanthaivel@codeaurora.org", "kvel"),
- Contributor("Nandor Huszka", "huszka.nandor@stud.u-szeged.hu", "hnandor"),
+ Contributor(u"Michael Br\u00fcning", "michael.bruning@nokia.com", "mibrunin"),
+ Contributor("Min Qin", "qinmin@chromium.org"),
+ Contributor("Nandor Huszka", "hnandor@inf.u-szeged.hu", "hnandor"),
Contributor("Oliver Varga", ["voliver@inf.u-szeged.hu", "Varga.Oliver@stud.u-szeged.hu"], "TwistO"),
Contributor("Peter Gal", "galpeter@inf.u-szeged.hu", "elecro"),
Contributor("Peter Linss", "peter.linss@hp.com", "plinss"),
Contributor("Radar WebKit Bug Importer", "webkit-bug-importer@group.apple.com"),
- Contributor("Roland Takacs", "takacs.roland@stud.u-szeged.hu", "rtakacs"),
- Contributor("Szilard Ledan-Muntean", "muntean-ledan.szilard@stud.u-szeged.hu", "szledan"),
+ Contributor("Roland Takacs", "rtakacs@inf.u-szeged.hu", "rtakacs"),
+ Contributor("Szilard Ledan-Muntean", "szledan@inf.u-szeged.hu", "szledan"),
Contributor("Tab Atkins", ["tabatkins@google.com", "jackalmage@gmail.com"], "tabatkins"),
Contributor("Tamas Czene", ["tczene@inf.u-szeged.hu", "Czene.Tamas@stud.u-szeged.hu"], "tczene"),
+ Contributor("Terry Anderson", "tdanderson@chromium.org", "tdanderson"),
+ Contributor("Tien-Ren Chen", "trchen@chromium.org"),
+ Contributor("Tom Hudson", "tomhudson@google.com"),
Contributor("WebKit Review Bot", "webkit.review.bot@gmail.com", "sheriff-bot"),
Contributor("Wyatt Carss", ["wcarss@chromium.org", "wcarss@google.com"], "wcarss"),
Contributor("Zoltan Arvai", "zarvai@inf.u-szeged.hu", "azbest_hu"),
@@ -159,7 +172,6 @@ contributors_who_are_not_committers = [
committers_unable_to_review = [
Committer("Aaron Boodman", "aa@chromium.org", "aboodman"),
- Committer("Abhishek Arya", "inferno@chromium.org", "inferno-sec"),
Committer("Adam Klein", "adamk@chromium.org", "aklein"),
Committer("Adam Langley", "agl@chromium.org", "agl"),
Committer("Ademar de Souza Reis Jr", ["ademar.reis@gmail.com", "ademar@webkit.org"], "ademar"),
@@ -168,7 +180,6 @@ committers_unable_to_review = [
Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"], "lypanov"),
Committer("Alexander Pavlov", "apavlov@chromium.org", "apavlov"),
Committer("Alexandru Chiculita", "achicu@adobe.com", "achicu"),
- Committer("Alexis Menard", ["alexis.menard@openbossa.org", "menard@kde.org", "alexis.menard@nokia.com"], "darktears"),
Committer("Alice Boxhall", "aboxhall@chromium.org", "aboxhall"),
Committer("Alok Priyadarshi", "alokp@chromium.org", "alokp"),
Committer("Ami Fischman", ["fischman@chromium.org", "fischman@google.com"], "fischman"),
@@ -185,6 +196,7 @@ committers_unable_to_review = [
Committer("Antoine Labour", "piman@chromium.org", "piman"),
Committer("Anton D'Auria", "adauria@apple.com", "antonlefou"),
Committer("Anton Muhin", "antonm@chromium.org", "antonm"),
+ Committer("Arko Saha", "arko@motorola.com", "arkos"),
Committer("Balazs Kelemen", "kbalazs@webkit.org", "kbalazs"),
Committer("Ben Murdoch", "benm@google.com", "benm"),
Committer("Ben Wells", "benwells@chromium.org", "benwells"),
@@ -205,6 +217,7 @@ committers_unable_to_review = [
Committer("Collin Jackson", "collinj@webkit.org", "collinjackson"),
Committer("Cris Neckar", "cdn@chromium.org", "cneckar"),
Committer("Dan Winship", "danw@gnome.org", "danw"),
+ Committer("Dana Jansens", "danakj@chromium.org", "danakj"),
Committer("Daniel Cheng", "dcheng@chromium.org", "dcheng"),
Committer("David Grogan", ["dgrogan@chromium.org", "dgrogan@google.com"], "dgrogan"),
Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"], "catfishman"),
@@ -222,6 +235,7 @@ committers_unable_to_review = [
Committer("Evan Stade", "estade@chromium.org", "estade"),
Committer("Fady Samuel", "fsamuel@chromium.org", "fsamuel"),
Committer("Feng Qian", "feng@chromium.org"),
+ Committer("Florin Malita", ["fmalita@chromium.org", "fmalita@google.com"], "fmalita"),
Committer("Fumitoshi Ukai", "ukai@chromium.org", "ukai"),
Committer("Gabor Loki", "loki@webkit.org", "loki04"),
Committer("Gabor Rapcsanyi", ["rgabor@webkit.org", "rgabor@inf.u-szeged.hu"], "rgabor"),
@@ -253,7 +267,6 @@ committers_unable_to_review = [
Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"], "jeremymos"),
Committer("Jesus Sanchez-Palencia", ["jesus@webkit.org", "jesus.palencia@openbossa.org"], "jeez_"),
Committer("Jia Pu", "jpu@apple.com"),
- Committer("Jocelyn Turcotte", "jocelyn.turcotte@nokia.com", "jturcotte"),
Committer("Jochen Eisinger", "jochen@chromium.org", "jochen__"),
Committer("John Abd-El-Malek", "jam@chromium.org", "jam"),
Committer("John Gregg", ["johnnyg@google.com", "johnnyg@chromium.org"], "johnnyg"),
@@ -266,6 +279,7 @@ committers_unable_to_review = [
Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"], "jparent"),
Committer("Jungshik Shin", "jshin@chromium.org"),
Committer("Justin Schuh", "jschuh@chromium.org", "jschuh"),
+ Committer("Kaustubh Atrawalkar", ["kaustubh@motorola.com"], "silverroots"),
Committer("Keishi Hattori", "keishi@webkit.org", "keishi"),
Committer("Kelly Norton", "knorton@google.com"),
Committer("Kenichi Ishibashi", "bashi@chromium.org", "bashi"),
@@ -285,10 +299,11 @@ committers_unable_to_review = [
Committer("Lucas Forschler", ["lforschler@apple.com"], "lforschler"),
Committer("Luke Macpherson", ["macpherson@chromium.org", "macpherson@google.com"], "macpherson"),
Committer("Mads Ager", "ager@chromium.org"),
- Committer("Mahesh Kulkarni", "mahesh.kulkarni@nokia.com", "maheshk"),
+ Committer("Mahesh Kulkarni", ["mahesh.kulkarni@nokia.com", "maheshk@webkit.org"], "maheshk"),
Committer("Marcus Voltis Bulach", "bulach@chromium.org"),
Committer("Mario Sanchez Prada", ["msanchez@igalia.com", "mario@webkit.org"], "msanchez"),
Committer("Mark Hahnenberg", "mhahnenberg@apple.com"),
+ Committer("Mary Wu", ["mary.wu@torchmobile.com.cn", "wwendy2007@gmail.com"], "marywu"),
Committer("Matt Delaney", "mdelaney@apple.com"),
Committer("Matt Lilek", ["mlilek@apple.com", "webkit@mattlilek.com", "pewtermoose@webkit.org"], "pewtermoose"),
Committer("Matt Perry", "mpcomplete@chromium.org"),
@@ -314,14 +329,16 @@ committers_unable_to_review = [
Committer("Peter Beverloo", ["peter@chromium.org", "beverloo@google.com"], "beverloo"),
Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"], "pkasting"),
Committer("Peter Varga", ["pvarga@webkit.org", "pvarga@inf.u-szeged.hu"], "stampho"),
+ Committer("Philip Rogers", ["pdr@google.com", "pdr@chromium.org"], "pdr"),
Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"], "pdherbemont"),
Committer("Pierre-Olivier Latour", "pol@apple.com", "pol"),
Committer("Pierre Rossi", "pierre.rossi@gmail.com", "elproxy"),
Committer("Pratik Solanki", "psolanki@apple.com", "psolanki"),
Committer("Qi Zhang", ["qi.2.zhang@nokia.com", "qi.zhang02180@gmail.com"], "qi"),
Committer("Rafael Antognolli", "antognolli@profusion.mobi", "antognolli"),
+ Committer("Rafael Brandao", "rafael.lobo@openbossa.org", "rafaelbrandao"),
Committer("Rafael Weinstein", "rafaelw@chromium.org", "rafaelw"),
- Committer("Raphael Kubo da Costa", ["kubo@profusion.mobi", "rakuco@FreeBSD.org"], "rakuco"),
+ Committer("Raphael Kubo da Costa", ["rakuco@webkit.org", "rakuco@FreeBSD.org"], "rakuco"),
Committer("Ravi Kasibhatla", "ravi.kasibhatla@motorola.com", "kphanee"),
Committer("Renata Hodovan", "reni@webkit.org", "reni"),
Committer("Robert Hogan", ["robert@webkit.org", "robert@roberthogan.net", "lists@roberthogan.net"], "mwenge"),
@@ -349,7 +366,7 @@ committers_unable_to_review = [
Committer("Vincent Scheib", "scheib@chromium.org", "scheib"),
Committer("Vitaly Repeshko", "vitalyr@chromium.org"),
Committer("William Siegrist", "wsiegrist@apple.com", "wms"),
- Committer("W. James MacLean", "wjmaclean@chromium.org", "wjmaclean"),
+ Committer("W. James MacLean", "wjmaclean@chromium.org", "seumas"),
Committer("Xianzhu Wang", ["wangxianzhu@chromium.org", "phnixwxz@gmail.com", "wangxianzhu@google.com"], "wangxianzhu"),
Committer("Xiaomei Ji", "xji@chromium.org", "xji"),
Committer("Yael Aharon", "yael.aharon@nokia.com", "yael"),
@@ -360,9 +377,11 @@ committers_unable_to_review = [
Committer("Yi Shen", ["yi.4.shen@nokia.com", "shenyi2006@gmail.com"]),
Committer("Yuta Kitamura", "yutak@chromium.org", "yutak"),
Committer("Yuzo Fujishima", "yuzo@google.com", "yuzo"),
+ Committer("Zalan Bujtas", ["zbujtas@gmail.com", "zalan.bujtas@nokia.com"], "zalan"),
Committer("Zeno Albisser", ["zeno@webkit.org", "zeno.albisser@nokia.com"], "zalbisser"),
Committer("Zhenyao Mo", "zmo@google.com", "zhenyao"),
Committer("Zoltan Horvath", ["zoltan@webkit.org", "hzoltan@inf.u-szeged.hu", "horvath.zoltan.6@stud.u-szeged.hu"], "zoltan"),
+ Committer(u"\u017dan Dober\u0161ek", "zandobersek@gmail.com", "zdobersek"),
]
@@ -372,6 +391,7 @@ committers_unable_to_review = [
reviewers_list = [
+ Reviewer("Abhishek Arya", "inferno@chromium.org", "inferno-sec"),
Reviewer("Ada Chan", "adachan@apple.com", "chanada"),
Reviewer("Adam Barth", "abarth@webkit.org", "abarth"),
Reviewer("Adam Roben", ["aroben@webkit.org", "aroben@apple.com"], "aroben"),
@@ -380,6 +400,7 @@ reviewers_list = [
Reviewer("Adrienne Walker", ["enne@google.com", "enne@chromium.org"], "enne"),
Reviewer("Alejandro G. Castro", ["alex@igalia.com", "alex@webkit.org"], "alexg__"),
Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"], "ap"),
+ Reviewer("Alexis Menard", ["alexis.menard@openbossa.org", "menard@kde.org"], "darktears"),
Reviewer("Alice Liu", "alice.liu@apple.com", "aliu"),
Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"], "alp"),
Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"], "andersca"),
@@ -394,7 +415,7 @@ reviewers_list = [
Reviewer("Brent Fulgham", "bfulgham@webkit.org", "bfulgham"),
Reviewer("Brian Weinstein", "bweinstein@apple.com", "bweinstein"),
Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]),
- Reviewer("Chang Shu", ["cshu@webkit.org", "Chang.Shu@nokia.com"], "cshu"),
+ Reviewer("Chang Shu", ["cshu@webkit.org", "c.shu@sisa.samsung.com"], "cshu"),
Reviewer("Chris Blumenberg", "cblu@apple.com", "cblu"),
Reviewer("Chris Marrin", "cmarrin@apple.com", "cmarrin"),
Reviewer("Chris Fleizach", "cfleizach@apple.com", "cfleizach"),
@@ -431,6 +452,7 @@ reviewers_list = [
Reviewer("Jeremy Orlow", ["jorlow@webkit.org", "jorlow@chromium.org"], "jorlow"),
Reviewer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"], "jessieberlin"),
Reviewer("Jian Li", "jianli@chromium.org", "jianli"),
+ Reviewer("Jocelyn Turcotte", "jocelyn.turcotte@nokia.com", "jturcotte"),
Reviewer("John Sullivan", "sullivan@apple.com", "sullivan"),
Reviewer("Jon Honeycutt", "jhoneycutt@apple.com", "jhoneycutt"),
Reviewer("Joseph Pecoraro", ["joepeck@webkit.org", "pecoraro@apple.com"], "JoePeck"),
diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py
index 4b3d90a1a..c086238fd 100644
--- a/Tools/Scripts/webkitpy/common/config/ports.py
+++ b/Tools/Scripts/webkitpy/common/config/ports.py
@@ -115,6 +115,9 @@ class DeprecatedPort(object):
def layout_tests_results_path(self):
return os.path.join(self.results_directory, "full_results.json")
+ def unit_tests_results_path(self):
+ return os.path.join(self.results_directory, "webkit_unit_tests_output.xml")
+
class MacPort(DeprecatedPort):
port_flag_name = "mac"
@@ -183,6 +186,9 @@ class ChromiumPort(DeprecatedPort):
command.append("--skip-failing-tests")
return command
+ def run_webkit_unit_tests_command(self):
+ return self.script_shell_command("run-chromium-webkit-unit-tests")
+
def run_javascriptcore_tests_command(self):
return None
diff --git a/Tools/Scripts/webkitpy/common/config/ports_mock.py b/Tools/Scripts/webkitpy/common/config/ports_mock.py
index d63efd2c6..1d1431115 100644
--- a/Tools/Scripts/webkitpy/common/config/ports_mock.py
+++ b/Tools/Scripts/webkitpy/common/config/ports_mock.py
@@ -36,6 +36,9 @@ class MockPort(object):
def layout_tests_results_path(self):
return "/mock-results/full_results.json"
+ def unit_tests_results_path(self):
+ return "/mock-results/webkit_unit_tests_output.xml"
+
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
diff --git a/Tools/Scripts/webkitpy/common/config/urls.py b/Tools/Scripts/webkitpy/common/config/urls.py
index 1f902f463..b547045f4 100644
--- a/Tools/Scripts/webkitpy/common/config/urls.py
+++ b/Tools/Scripts/webkitpy/common/config/urls.py
@@ -43,11 +43,39 @@ def chromium_results_zip_url(builder_name):
chromium_lkgr_url = "http://chromium-status.appspot.com/lkgr"
contribution_guidelines = "http://webkit.org/coding/contributing.html"
-bug_server_host = "bugs.webkit.org"
+bug_server_domain = "webkit.org"
+bug_server_host = "bugs." + bug_server_domain
_bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host)
bug_server_url = "https://%s/" % bug_server_host
bug_url_long = _bug_server_regex + r"show_bug\.cgi\?id=(?P<bug_id>\d+)(&ctype=xml)?"
-bug_url_short = r"https?\://webkit\.org/b/(?P<bug_id>\d+)"
+bug_url_short = r"https?\://%s/b/(?P<bug_id>\d+)" % bug_server_domain
+
+attachment_url = _bug_server_regex + r"attachment\.cgi\?id=(?P<attachment_id>\d+)(&action=(?P<action>\w+))?"
+direct_attachment_url = r"https?://bug-(?P<bug_id>\d+)-attachments.%s/attachment\.cgi\?id=(?P<attachment_id>\d+)" % bug_server_domain
buildbot_url = "http://build.webkit.org"
chromium_buildbot_url = "http://build.chromium.org/p/chromium.webkit"
+
+
+def parse_bug_id(string):
+ if not string:
+ return None
+ match = re.search(bug_url_short, string)
+ if match:
+ return int(match.group('bug_id'))
+ match = re.search(bug_url_long, string)
+ if match:
+ return int(match.group('bug_id'))
+ return None
+
+
+def parse_attachment_id(string):
+ if not string:
+ return None
+ match = re.search(attachment_url, string)
+ if match:
+ return int(match.group('attachment_id'))
+ match = re.search(direct_attachment_url, string)
+ if match:
+ return int(match.group('attachment_id'))
+ return None
diff --git a/Tools/Scripts/webkitpy/common/config/urls_unittest.py b/Tools/Scripts/webkitpy/common/config/urls_unittest.py
new file mode 100644
index 000000000..74c224090
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/urls_unittest.py
@@ -0,0 +1,54 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from .urls import parse_bug_id, parse_attachment_id
+
+
+class URLsTest(unittest.TestCase):
+ def test_parse_bug_id(self):
+ # FIXME: These would be all better as doctests
+ self.assertEquals(12345, parse_bug_id("http://webkit.org/b/12345"))
+ self.assertEquals(12345, parse_bug_id("foo\n\nhttp://webkit.org/b/12345\nbar\n\n"))
+ self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345"))
+
+ # Our url parser is super-fragile, but at least we're testing it.
+ self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345"))
+ self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
+
+ def test_parse_attachment_id(self):
+ self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=review"))
+ self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=edit"))
+ self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=prettypatch"))
+ self.assertEquals(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=diff"))
+
+ # Direct attachment links are hosted from per-bug subdomains:
+ self.assertEquals(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345"))
+ # Make sure secure attachment URLs work too.
+ self.assertEquals(12345, parse_attachment_id("https://bug-23456-attachments.webkit.org/attachment.cgi?id=12345&t=Bqnsdkl9fs"))
diff --git a/Tools/Scripts/webkitpy/common/config/watchlist b/Tools/Scripts/webkitpy/common/config/watchlist
index dbdbd392d..6984db74d 100755
--- a/Tools/Scripts/webkitpy/common/config/watchlist
+++ b/Tools/Scripts/webkitpy/common/config/watchlist
@@ -20,9 +20,7 @@
},
"ChromiumPublicApi": {
"filename": r"Source/WebKit/chromium/public/"
- },
- "ChromiumPlatformApi": {
- "filename": r"Source/Platform/chromium/public/"
+ r"|Source/Platform/chromium/public/",
},
"AppleMacPublicApi": {
"filename": r"Source/WebCore/bindings/objc/PublicDOMInterfaces.h"
@@ -83,6 +81,9 @@
"Loader": {
"filename": r"Source/WebCore/loader/",
},
+ "Rendering": {
+ "filename": r"Source/WebCore/rendering/",
+ },
"StyleChecker": {
"filename": r"Tools/Scripts/webkitpy/style/",
},
@@ -127,8 +128,10 @@
"EFL": {
"filename": r"Source/WebKit/efl/"
r"|Source/WebCore/platform/efl/"
+ r"|Source/WTF/wtf/efl/"
r"|Tools/EWebLauncher"
- r"|Tools/DumpRenderTree/efl/",
+ r"|Tools/DumpRenderTree/efl/"
+ r"|LayoutTests/platform/efl/",
},
"CMake": {
"filename": r".*CMakeLists\w*\.txt"
@@ -140,44 +143,52 @@
},
"ScrollingCoordinator": {
"filename": r"Source/WebCore/page/scrolling/",
+ },
+ "WebKitGTKTranslations": {
+ "filename": r"Source/WebKit/gtk/po/",
+ },
+ "Media": {
+ "filename": r"(Source|LayoutTests)/.*([Mm]edia|[Aa]udio|[Vv]ideo)",
}
},
"CC_RULES": {
# Note: All email addresses listed must be registered with bugzilla.
# Specifically, levin@chromium.org and levin+threading@chromium.org are
# two different accounts as far as bugzilla is concerned.
+ "AppleMacPublicApi": [ "timothy@apple.com" ],
+ "CMake": [ "rakuco@webkit.org", ],
+ "CSS": [ "alexis.menard@openbossa.org", "macpherson@chromium.org", ],
"ChromiumDumpRenderTree": [ "tkent@chromium.org", ],
"ChromiumGraphics": [ "jamesr@chromium.org", "cc-bugs@google.com" ],
- "ChromiumPublicApi": [ "abarth@webkit.org", "fishd@chromium.org" ],
- "ChromiumPlatformApi": [ "abarth@webkit.org", "fishd@chromium.org", "jamesr@chromium.org" ],
- "AppleMacPublicApi": [ "timothy@apple.com" ],
+ "ChromiumPublicApi": [ "abarth@webkit.org", "dglazkov@chromium.org", "fishd@chromium.org", "jamesr@chromium.org", "tkent+wkapi@chromium.org" ],
+ "EFL": [ "rakuco@webkit.org", ],
"Forms": [ "tkent@chromium.org", ],
- "GStreamerGraphics": [ "alexis.menard@openbossa.org", "pnormand@igalia.com", "gns@gnome.org" ],
- "WebIDL": [ "abarth@webkit.org", "ojan@chromium.org" ],
- "StyleChecker": [ "levin@chromium.org", ],
- "ThreadingFiles|ThreadingUsage": [ "levin+threading@chromium.org", ],
- "WatchListScript": [ "levin+watchlist@chromium.org", ],
- "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org" ],
"FrameLoader": [ "abarth@webkit.org", "japhet@chromium.org" ],
+ "GStreamerGraphics": [ "alexis.menard@openbossa.org", "pnormand@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
+ "GtkWebKit2PublicAPI": [ "cgarcia@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
"Loader": [ "japhet@chromium.org" ],
- "SecurityCritical": [ "abarth@webkit.org" ],
- "webkitpy": [ "abarth@webkit.org", "ojan@chromium.org" ],
- "TestFailures": [ "abarth@webkit.org", "dglazkov@chromium.org" ],
- "GtkWebKit2PublicAPI": [ "cgarcia@igalia.com", "gns@gnome.org" ],
+ "Media": [ "feature-media-reviews@chromium.org", "eric.carlson@apple.com" ],
"QtBuildSystem" : [ "vestbo@webkit.org", ],
- "QtWebKit2PublicAPI": [ "alexis.menard@openbossa.org", "zoltan@webkit.org", ],
"QtWebKit2PlatformSpecific": [ "alexis.menard@openbossa.org", "zoltan@webkit.org", ],
- "CSS": [ "alexis.menard@openbossa.org", "macpherson@chromium.org", ],
- "EFL": [ "kubo@profusion.mobi", ],
- "CMake": [ "kubo@profusion.mobi", ],
- "SoupNetwork": [ "kubo@profusion.mobi", ],
+ "QtWebKit2PublicAPI": [ "alexis.menard@openbossa.org", "zoltan@webkit.org", ],
+ "Rendering": [ "eric@webkit.org" ],
"ScrollingCoordinator": [ "andersca@apple.com", "jamesr@chromium.org", "tonikitoo@webkit.org" ],
+ "SecurityCritical": [ "abarth@webkit.org" ],
+ "SoupNetwork": [ "rakuco@webkit.org", "gns@gnome.org", "mrobinson@webkit.org", "danw@gnome.org" ],
+ "StyleChecker": [ "levin@chromium.org", ],
+ "TestFailures": [ "abarth@webkit.org", "dglazkov@chromium.org" ],
+ "ThreadingFiles|ThreadingUsage": [ "levin+threading@chromium.org", ],
+ "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org" ],
+ "WatchListScript": [ "levin+watchlist@chromium.org", ],
+ "WebIDL": [ "abarth@webkit.org", "ojan@chromium.org" ],
+ "WebKitGTKTranslations": [ "gns@gnome.org", "mrobinson@webkit.org" ],
+ "webkitpy": [ "abarth@webkit.org", "ojan@chromium.org", "dpranke@chromium.org" ],
},
"MESSAGE_RULES": {
- "ChromiumPublicApi": [ "Please wait for approval from fishd@chromium.org before submitting "
- "because this patch contains changes to the Chromium public API.", ],
- "ChromiumPlatformApi": [ "Please wait for approval from fishd@chromium.org, abarth@webkit.org or jamesr@chromium.org before submitting "
- "because this patch contains changes to the Chromium platform API.", ],
+ "ChromiumPublicApi": [ "Please wait for approval from abarth@webkit.org, dglazkov@chromium.org, "
+ "fishd@chromium.org, jamesr@chromium.org or tkent@chromium.org before "
+ "submitting, as this patch contains changes to the Chromium public API. "
+ "See also https://trac.webkit.org/wiki/ChromiumWebKitAPI." ],
"AppleMacPublicApi": [ "Please wait for approval from timothy@apple.com (or another member "
"of the Apple Safari Team) before submitting "
"because this patch contains changes to the Apple Mac "
diff --git a/Tools/Scripts/webkitpy/common/find_files_unittest.py b/Tools/Scripts/webkitpy/common/find_files_unittest.py
index b9068df48..75beaf0b4 100644
--- a/Tools/Scripts/webkitpy/common/find_files_unittest.py
+++ b/Tools/Scripts/webkitpy/common/find_files_unittest.py
@@ -29,6 +29,7 @@
import sys
import unittest
+from webkitpy.common.system.filesystem import FileSystem
import find_files
@@ -55,7 +56,7 @@ class TestWinNormalize(unittest.TestCase):
def test_win(self):
# This tests the actual windows platform, to ensure we get the same
# results that we get in test_mocked_win().
- if sys.platform != 'win':
+ if sys.platform != 'win32':
return
self.assert_filesystem_normalizes(FileSystem())
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
index de96fa160..986ce3bac 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
@@ -32,7 +32,7 @@ import StringIO
from .bugzilla import Bugzilla, BugzillaQueries, EditUsersParser
-from webkitpy.common.checkout.changelog import parse_bug_id
+from webkitpy.common.config import urls
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.web_mock import MockBrowser
from webkitpy.thirdparty.mock import Mock
@@ -89,17 +89,11 @@ class BugzillaTest(unittest.TestCase):
self.assertEquals(None, bugs.attachment_url_for_id(None))
def test_parse_bug_id(self):
- # FIXME: These would be all better as doctests
+ # Test that we can parse the urls we produce.
bugs = Bugzilla()
- self.assertEquals(12345, parse_bug_id("http://webkit.org/b/12345"))
- self.assertEquals(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345"))
- self.assertEquals(12345, parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
- self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345)))
- self.assertEquals(12345, parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
-
- # Our bug parser is super-fragile, but at least we're testing it.
- self.assertEquals(None, parse_bug_id("http://www.webkit.org/b/12345"))
- self.assertEquals(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
+ self.assertEquals(12345, urls.parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
+ self.assertEquals(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345)))
+ self.assertEquals(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
_bug_xml = """
<bug>
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
index 1cb2bddf9..159b0077d 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
@@ -473,30 +473,3 @@ class BuildBot(object):
if len(builders_succeeded_in_future) == len(builder_revisions) and len(builders_succeeded_in_past) == len(builder_revisions):
return revision
return None
-
- def last_green_revision(self, builder_name_regex):
- compiled_builder_name_regex = re.compile(builder_name_regex, flags=re.IGNORECASE)
- builders = [builder for builder in self.builders() if compiled_builder_name_regex.search(builder.name())]
- if len(builders) > 10:
- return '"%s" matches too many bots' % builder_name_regex
- elif not len(builders):
- return '"%s" doesn\'t match any bot' % builder_name_regex
-
- builder_revisions = {}
- for builder in builders:
- builder_revisions[builder] = self._revisions_for_builder(builder)
-
- result = ''
- revision_with_all_builders = self._find_green_revision(builder_revisions)
- if revision_with_all_builders:
- result += 'The last known green revision is %d\n' % revision_with_all_builders
-
- for builder in builders:
- succeeded_revisions = [revision for revision, succeeded in builder_revisions[builder] if succeeded]
- if not succeeded_revisions:
- result += '%s has had no green revision in the last %d runs' % (builder.name(), len(builder_revisions[builder]))
- else:
- result += '%s: %d' % (builder.name(), max(succeeded_revisions))
- result += "\n"
-
- return result
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
index 34f714985..966fd5fc6 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py
@@ -90,9 +90,6 @@ class MockBuildBot(object):
self._mock_builder2_status,
]
- def last_green_revision(self, builder_name):
- return builder_name + ' 1: ' + str(9479) + '\n' + builder_name + ' 2: ' + str(9400)
-
def light_tree_on_fire(self):
self._mock_builder2_status["is_green"] = False
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
index 7e22cf5ff..355786ae0 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
@@ -434,31 +434,6 @@ class BuildBotTest(unittest.TestCase):
'Builder 3': [(1, True), (3, True), (7, True), (11, False), (12, True)],
}), 7)
- def test_last_green_revision(self):
- buildbot = BuildBot()
-
- def mock_builds_from_builders():
- return self._fake_builds_at_index(0)
-
- # Revision, is_green
- # Ordered from newest (highest number) to oldest.
- fake_builder1 = Builder("Fake Builder 1", None)
- fake_builder1.revisions = [(1, True), (3, False), (5, True), (10, True), (12, False)]
- fake_builder2 = Builder("Fake Builder 2", None)
- fake_builder2.revisions = [(1, True), (3, False), (7, True), (9, True), (12, False)]
- some_builder = Builder("Some Builder", None)
- some_builder.revisions = [(1, True), (3, True), (7, True), (11, False), (12, True)]
-
- buildbot.builders = lambda: [fake_builder1, fake_builder2, some_builder]
- buildbot._revisions_for_builder = lambda builder: builder.revisions
- buildbot._latest_builds_from_builders = mock_builds_from_builders
- self.assertEqual(buildbot.last_green_revision(''),
- "The last known green revision is 7\nFake Builder 1: 10\nFake Builder 2: 9\nSome Builder: 12\n")
-
- some_builder.revisions = [(1, False), (3, False)]
- self.assertEqual(buildbot.last_green_revision(''),
- "Fake Builder 1: 10\nFake Builder 2: 9\nSome Builder has had no green revision in the last 2 runs\n")
-
def _fetch_build(self, build_number):
if build_number == 5:
return "correct build"
diff --git a/Tools/Scripts/webkitpy/common/net/file_uploader.py b/Tools/Scripts/webkitpy/common/net/file_uploader.py
index 339045e6a..9b220b0d6 100644
--- a/Tools/Scripts/webkitpy/common/net/file_uploader.py
+++ b/Tools/Scripts/webkitpy/common/net/file_uploader.py
@@ -27,12 +27,11 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import codecs
import mimetypes
-import socket
+import time
import urllib2
-from webkitpy.common.net.networktransaction import NetworkTransaction
+from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
def get_mime_type(filename):
@@ -92,9 +91,7 @@ class FileUploader(object):
def upload_as_multipart_form_data(self, filesystem, files, attrs):
file_objs = []
for filename, path in files:
- # FIXME: We should talk to the filesytem via a Host object.
- with codecs.open(path, "rb") as file:
- file_objs.append(('file', filename, file.read()))
+ file_objs.append(('file', filename, filesystem.read_binary_file(path)))
# FIXME: We should use the same variable names for the formal and actual parameters.
content_type, data = _encode_multipart_form_data(attrs, file_objs)
@@ -102,15 +99,10 @@ class FileUploader(object):
def _upload_data(self, content_type, data):
def callback():
+ # FIXME: Setting a timeout, either globally using socket.setdefaulttimeout()
+ # or in urlopen(), doesn't appear to work on Mac 10.5 with Python 2.7.
+ # For now we will ignore the timeout value and hope for the best.
request = urllib2.Request(self._url, data, {"Content-Type": content_type})
return urllib2.urlopen(request)
- orig_timeout = socket.getdefaulttimeout()
- response = None
- try:
- # FIXME: We shouldn't mutate global static state.
- socket.setdefaulttimeout(self._timeout_seconds)
- response = NetworkTransaction(timeout_seconds=self._timeout_seconds).run(callback)
- finally:
- socket.setdefaulttimeout(orig_timeout)
- return response
+ return NetworkTransaction(timeout_seconds=self._timeout_seconds).run(callback)
diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircbot.py b/Tools/Scripts/webkitpy/common/net/irc/ircbot.py
index 061a43cf6..c8c1a38a9 100644
--- a/Tools/Scripts/webkitpy/common/net/irc/ircbot.py
+++ b/Tools/Scripts/webkitpy/common/net/irc/ircbot.py
@@ -73,8 +73,25 @@ class IRCBot(ircbot.SingleServerIRCBot, MessagePumpDelegate):
def on_pubmsg(self, connection, event):
nick = irclib.nm_to_n(event.source())
- request = event.arguments()[0].split(":", 1)
- if len(request) > 1 and irclib.irc_lower(request[0]) == irclib.irc_lower(self.connection.get_nickname()):
+ request = event.arguments()[0]
+
+ if not irclib.irc_lower(request).startswith(irclib.irc_lower(connection.get_nickname())):
+ return
+
+ if len(request) <= len(connection.get_nickname()):
+ return
+
+ # Some IRC clients, like xchat-gnome, default to using a comma
+ # when addressing someone.
+ vocative_separator = request[len(connection.get_nickname())]
+ if vocative_separator == ':':
+ request = request.split(':', 1)
+ elif vocative_separator == ',':
+ request = request.split(',', 1)
+ else:
+ return
+
+ if len(request) > 1:
response = self._delegate.irc_message_received(nick, request[1])
if response:
connection.privmsg(self._channel, response)
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
index 05f8215d0..bd7211cca 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
@@ -144,6 +144,7 @@ class LayoutTestResults(object):
def __init__(self, test_results):
self._test_results = test_results
self._failure_limit_count = None
+ self._unit_test_failures = []
# FIXME: run-webkit-tests should store the --exit-after-N-failures value
# (or some indication of early exit) somewhere in the results.html/results.json
@@ -172,4 +173,7 @@ class LayoutTestResults(object):
return self.results_matching_failure_types(failure_types)
def failing_tests(self):
- return [result.test_name for result in self.failing_test_results()]
+ return [result.test_name for result in self.failing_test_results()] + self._unit_test_failures
+
+ def add_unit_test_failures(self, unit_test_results):
+ self._unit_test_failures = unit_test_results
diff --git a/Tools/Scripts/webkitpy/common/net/networktransaction.py b/Tools/Scripts/webkitpy/common/net/networktransaction.py
index c77989ba2..03b143267 100644
--- a/Tools/Scripts/webkitpy/common/net/networktransaction.py
+++ b/Tools/Scripts/webkitpy/common/net/networktransaction.py
@@ -1,9 +1,9 @@
# Copyright (C) 2010 Google Inc. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -13,7 +13,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,7 +37,8 @@ _log = logging.getLogger(__name__)
class NetworkTimeout(Exception):
- pass
+ def __str__(self):
+ return 'NetworkTimeout'
class NetworkTransaction(object):
diff --git a/Tools/Scripts/webkitpy/common/net/statusserver.py b/Tools/Scripts/webkitpy/common/net/statusserver.py
index 60153f8a9..2bda1ce88 100644
--- a/Tools/Scripts/webkitpy/common/net/statusserver.py
+++ b/Tools/Scripts/webkitpy/common/net/statusserver.py
@@ -125,8 +125,8 @@ class StatusServer:
def next_work_item(self, queue_name):
_log.debug("Fetching next work item for %s" % queue_name)
- patch_status_url = "%s/next-patch/%s" % (self.url, queue_name)
- return self._fetch_url(patch_status_url)
+ next_patch_url = "%s/next-patch/%s" % (self.url, queue_name)
+ return self._fetch_url(next_patch_url)
def _post_release_work_item(self, queue_name, patch):
release_patch_url = "%s/release-patch" % (self.url)
diff --git a/Tools/Scripts/webkitpy/common/net/unittestresults.py b/Tools/Scripts/webkitpy/common/net/unittestresults.py
new file mode 100644
index 000000000..bb82b0503
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/unittestresults.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2012, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import xml.dom.minidom
+
+from webkitpy.common.system.deprecated_logging import log
+
+
+class UnitTestResults(object):
+ @classmethod
+ def results_from_string(self, string):
+ if not string:
+ return None
+ try:
+ dom = xml.dom.minidom.parseString(string)
+ failures = []
+ for testcase in dom.getElementsByTagName('testcase'):
+ if testcase.getElementsByTagName('failure').length != 0:
+ testname = testcase.getAttribute('name')
+ classname = testcase.getAttribute('classname')
+ failures.append("%s.%s" % (classname, testname))
+ return failures
+ except xml.parsers.expat.ExpatError, e:
+ log("XML error %s parsing unit test output" % str(e))
+ return None
diff --git a/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
new file mode 100644
index 000000000..f8852062c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2012, Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from unittestresults import UnitTestResults
+
+
+class UnitTestResultsTest(unittest.TestCase):
+
+ def test_nostring(self):
+ self.assertEquals(None, UnitTestResults.results_from_string(None))
+
+ def test_emptystring(self):
+ self.assertEquals(None, UnitTestResults.results_from_string(""))
+
+ def test_nofailures(self):
+ no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
+ <testsuite name="RenderTableCellDeathTest" tests="3" failures="0" disabled="0" errors="0" time="0.677">
+ <testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
+ <testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
+ <testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
+ </testsuite>
+</testsuites>"""
+ self.assertEquals([], UnitTestResults.results_from_string(no_failures_xml))
+
+ def test_onefailure(self):
+ one_failure_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="4" failures="1" disabled="0" errors="0" time="11.35" name="AllTests">
+ <testsuite name="RenderTableCellDeathTest" tests="4" failures="1" disabled="0" errors="0" time="0.677">
+ <testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
+ <testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
+ <testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
+ <testcase name="FAILS_DivAutoZoomParamsTest" status="run" time="0.02" classname="WebFrameTest">
+ <failure message="Value of: scale&#x0A; Actual: 4&#x0A;Expected: 1" type=""><![CDATA[../../Source/WebKit/chromium/tests/WebFrameTest.cpp:191
+Value of: scale
+ Actual: 4
+Expected: 1]]></failure>
+ </testcase>
+ </testsuite>
+</testsuites>"""
+ expected = ["WebFrameTest.FAILS_DivAutoZoomParamsTest"]
+ self.assertEquals(expected, UnitTestResults.results_from_string(one_failure_xml))
+
+ def test_multiple_failures_per_test(self):
+ multiple_failures_per_test_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="4" failures="2" disabled="0" errors="0" time="11.35" name="AllTests">
+ <testsuite name="UnitTests" tests="4" failures="2" disable="0" errors="0" time="10.0">
+ <testcase name="TestOne" status="run" time="0.5" classname="ClassOne">
+ <failure message="Value of: pi&#x0A; Actual: 3&#x0A;Expected: 3.14" type=""><![CDATA[../../Source/WebKit/chromium/tests/ClassOneTest.cpp:42
+Value of: pi
+ Actual: 3
+Expected: 3.14]]></failure>
+ </testcase>
+ <testcase name="TestTwo" status="run" time="0.5" classname="ClassTwo">
+ <failure message="Value of: e&#x0A; Actual: 2&#x0A;Expected: 2.71" type=""><![CDATA[../../Source/WebKit/chromium/tests/ClassTwoTest.cpp:30
+Value of: e
+ Actual: 2
+Expected: 2.71]]></failure>
+ <failure message="Value of: tau&#x0A; Actual: 6&#x0A;Expected: 6.28" type=""><![CDATA[../../Source/WebKit/chromium/tests/ClassTwoTest.cpp:55
+Value of: tau
+ Actual: 6
+Expected: 6.28]]></failure>
+ </testcase>
+ </testsuite>
+</testsuites>"""
+ expected = ["ClassOne.TestOne", "ClassTwo.TestTwo"]
+ self.assertEquals(expected, UnitTestResults.results_from_string(multiple_failures_per_test_xml))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/crashlogs.py b/Tools/Scripts/webkitpy/common/system/crashlogs.py
index a6b6575f6..0dd37d255 100644
--- a/Tools/Scripts/webkitpy/common/system/crashlogs.py
+++ b/Tools/Scripts/webkitpy/common/system/crashlogs.py
@@ -26,51 +26,46 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import os
import re
-import sys
class CrashLogs(object):
- def __init__(self, filesystem):
- self._filesystem = filesystem
+ def __init__(self, host):
+ self._host = host
- def find_newest_log(self, process_name, pid=None):
- if sys.platform == "darwin":
- return self._find_newest_log_darwin(process_name, pid)
+ def find_newest_log(self, process_name, pid=None, include_errors=False, newer_than=None):
+ if self._host.platform.is_mac():
+ return self._find_newest_log_darwin(process_name, pid, include_errors, newer_than)
+ return None
def _log_directory_darwin(self):
- log_directory = self._filesystem.expanduser("~")
- log_directory = self._filesystem.join(log_directory, "Library", "Logs")
- if self._filesystem.exists(self._filesystem.join(log_directory, "DiagnosticReports")):
- log_directory = self._filesystem.join(log_directory, "DiagnosticReports")
+ log_directory = self._host.filesystem.expanduser("~")
+ log_directory = self._host.filesystem.join(log_directory, "Library", "Logs")
+ if self._host.filesystem.exists(self._host.filesystem.join(log_directory, "DiagnosticReports")):
+ log_directory = self._host.filesystem.join(log_directory, "DiagnosticReports")
else:
- log_directory = self._filesystem.join(log_directory, "CrashReporter")
+ log_directory = self._host.filesystem.join(log_directory, "CrashReporter")
return log_directory
- def _find_newest_log_darwin(self, process_name, pid):
+ def _find_newest_log_darwin(self, process_name, pid, include_errors, newer_than):
def is_crash_log(fs, dirpath, basename):
return basename.startswith(process_name + "_") and basename.endswith(".crash")
log_directory = self._log_directory_darwin()
- logs = self._filesystem.files_under(log_directory, file_filter=is_crash_log)
- if not logs:
- return None
+ logs = self._host.filesystem.files_under(log_directory, file_filter=is_crash_log)
first_line_regex = re.compile(r'^Process:\s+(?P<process_name>.*) \[(?P<pid>\d+)\]$')
+ errors = ''
for path in reversed(sorted(logs)):
- try:
- with self._filesystem.open_text_file_for_reading(path) as f:
- first_line = f.readline()
+ if not newer_than or self._host.filesystem.mtime(path) > newer_than:
+ try:
+ f = self._host.filesystem.read_text_file(path)
+ match = first_line_regex.match(f[0:f.find('\n')])
+ if match and match.group('process_name') == process_name and (pid is None or int(match.group('pid')) == pid):
+ return errors + f
+ except IOError, e:
+ if include_errors:
+ errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
- match = first_line_regex.match(first_line)
- if not match:
- continue
- if match.group('process_name') != process_name:
- continue
- if pid is not None and int(match.group('pid')) != pid:
- continue
-
- f.seek(0, os.SEEK_SET)
- return f.read()
- except IOError:
- continue
+ if include_errors and errors:
+ return errors
+ return None
diff --git a/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py b/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
index ab1a6c2ad..d93feec0e 100644
--- a/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
@@ -22,10 +22,11 @@
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
-import sys
-from webkitpy.common.system.crashlogs import *
+from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.thirdparty.mock import Mock
@@ -75,7 +76,7 @@ class CrashLogsTest(unittest.TestCase):
self.assertEqual(a.splitlines(), b.splitlines())
def test_find_log_darwin(self):
- if sys.platform != "darwin":
+ if not SystemHost().platform.is_mac():
return
older_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28528)
@@ -91,7 +92,7 @@ class CrashLogsTest(unittest.TestCase):
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash'] = other_process_mock_crash_report
files['/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash'] = misformatted_mock_crash_report
filesystem = MockFileSystem(files)
- crash_logs = CrashLogs(filesystem)
+ crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
log = crash_logs.find_newest_log("DumpRenderTree")
self.assertLinesEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28529)
@@ -100,3 +101,12 @@ class CrashLogsTest(unittest.TestCase):
self.assertLinesEqual(log, mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28531)
self.assertEqual(log, None)
+ log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
+ self.assertEqual(log, None)
+
+ def bad_read(path):
+ raise IOError('No such file or directory')
+
+ filesystem.read_text_file = bad_read
+ log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
+ self.assertTrue('No such file or directory' in log)
diff --git a/Tools/Scripts/webkitpy/common/system/executive.py b/Tools/Scripts/webkitpy/common/system/executive.py
index 06f870c72..43dcbca1b 100644
--- a/Tools/Scripts/webkitpy/common/system/executive.py
+++ b/Tools/Scripts/webkitpy/common/system/executive.py
@@ -32,7 +32,6 @@ import ctypes
import errno
import logging
import os
-import platform
import StringIO
import signal
import subprocess
@@ -255,16 +254,14 @@ class Executive(object):
def check_running_pid(self, pid):
"""Return True if pid is alive, otherwise return False."""
- if sys.platform.startswith('linux') or sys.platform in ('darwin', 'cygwin'):
- try:
- os.kill(pid, 0)
- return True
- except OSError:
- return False
- elif sys.platform == 'win32':
+ if sys.platform == 'win32':
return self._win32_check_running_pid(pid)
- assert(False)
+ try:
+ os.kill(pid, 0)
+ return True
+ except OSError:
+ return False
def running_pids(self, process_name_filter=None):
if not process_name_filter:
diff --git a/Tools/Scripts/webkitpy/common/system/executive_mock.py b/Tools/Scripts/webkitpy/common/system/executive_mock.py
index a76268129..d57a5c480 100644
--- a/Tools/Scripts/webkitpy/common/system/executive_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/executive_mock.py
@@ -27,15 +27,20 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
+import StringIO
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.system.executive import ScriptError
class MockProcess(object):
- def __init__(self):
+ def __init__(self, stdout='MOCK STDOUT\n'):
self.pid = 42
+ self.stdout = StringIO.StringIO(stdout)
+ self.stdin = StringIO.StringIO()
+ def wait(self):
+ return
# FIXME: This should be unified with MockExecutive2
class MockExecutive(object):
@@ -52,6 +57,7 @@ class MockExecutive(object):
self._should_throw_when_run = should_throw_when_run or set()
# FIXME: Once executive wraps os.getpid() we can just use a static pid for "this" process.
self._running_pids = [os.getpid()]
+ self._proc = None
def check_running_pid(self, pid):
return pid in self._running_pids
@@ -89,9 +95,18 @@ class MockExecutive(object):
def cpu_count(self):
return 2
- def popen(self, *args, **kwargs):
- # FIXME: Implement logging when self._should_log is set.
- return MockProcess()
+ def popen(self, args, cwd=None, env=None, **kwargs):
+ if self._should_log:
+ cwd_string = ""
+ if cwd:
+ cwd_string = ", cwd=%s" % cwd
+ env_string = ""
+ if env:
+ env_string = ", env=%s" % env
+ log("MOCK popen: %s%s%s" % (args, cwd_string, env_string))
+ if not self._proc:
+ self._proc = MockProcess()
+ return self._proc
class MockExecutive2(object):
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock.py b/Tools/Scripts/webkitpy/common/system/file_lock.py
index b4bfffc40..c542777f2 100644
--- a/Tools/Scripts/webkitpy/common/system/file_lock.py
+++ b/Tools/Scripts/webkitpy/common/system/file_lock.py
@@ -43,20 +43,20 @@ class FileLock(object):
self._max_wait_time_sec = max_wait_time_sec
def _create_lock(self):
- if sys.platform.startswith('linux') or sys.platform in ('darwin', 'cygwin'):
- import fcntl
- fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB)
- elif sys.platform == 'win32':
+ if sys.platform == 'win32':
import msvcrt
msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_NBLCK, 32)
+ else:
+ import fcntl
+ fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB)
def _remove_lock(self):
- if sys.platform.startswith('linux') or sys.platform in ('darwin', 'cygwin'):
- import fcntl
- fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_UN)
- elif sys.platform == 'win32':
+ if sys.platform == 'win32':
import msvcrt
msvcrt.locking(self._lock_file_descriptor, msvcrt.LK_UNLCK, 32)
+ else:
+ import fcntl
+ fcntl.flock(self._lock_file_descriptor, fcntl.LOCK_UN)
def acquire_lock(self):
self._lock_file_descriptor = os.open(self._lock_file_path, os.O_TRUNC | os.O_CREAT)
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem.py b/Tools/Scripts/webkitpy/common/system/filesystem.py
index 60b680c06..687a31322 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem.py
@@ -55,6 +55,9 @@ class FileSystem(object):
def abspath(self, path):
return os.path.abspath(path)
+ def realpath(self, path):
+ return os.path.realpath(path)
+
def path_to_module(self, module_name):
"""A wrapper for all calls to __file__ to allow easy unit testing."""
# FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
@@ -201,6 +204,8 @@ class FileSystem(object):
f.write(contents)
def open_text_file_for_reading(self, path):
+ # Note: There appears to be an issue with the returned file objects
+ # not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
return codecs.open(path, 'r', 'utf8')
def open_text_file_for_writing(self, path):
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
index 2ff688af7..d4a955080 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
@@ -82,6 +82,9 @@ class MockFileSystem(object):
return self.normpath(path)
return self.abspath(self.join(self.cwd, path))
+ def realpath(self, path):
+ return self.abspath(path)
+
def basename(self, path):
return self._split(path)[1]
@@ -129,7 +132,7 @@ class MockFileSystem(object):
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
- if file_filter(self, self.dirname(path), self.basename(path)):
+ if file_filter(self, self.dirname(path), self.basename(path)) and self.files[path] is not None:
files.append(path)
return files
@@ -149,7 +152,7 @@ class MockFileSystem(object):
continue
dirpath, basename = self._split(filename)
- if file_filter(self, dirpath, basename):
+ if file_filter(self, dirpath, basename) and self.files[filename] is not None:
files.append(filename)
return files
@@ -275,7 +278,7 @@ class MockFileSystem(object):
def normpath(self, path):
# This function is called a lot, so we try to optimize the common cases
# instead of always calling _slow_but_correct_normpath(), above.
- if '..' in path:
+ if '..' in path or '/./' in path:
# This doesn't happen very often; don't bother trying to optimize it.
return self._slow_but_correct_normpath(path)
if not path:
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo.py b/Tools/Scripts/webkitpy/common/system/platforminfo.py
index dd6d6844c..22cafbbee 100644
--- a/Tools/Scripts/webkitpy/common/system/platforminfo.py
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo.py
@@ -48,6 +48,8 @@ class PlatformInfo(object):
self.os_name = self._determine_os_name(sys_module.platform)
if self.os_name == 'linux':
self.os_version = self._determine_linux_version()
+ if self.os_name == 'freebsd':
+ self.os_version = platform_module.release()
if self.os_name.startswith('mac'):
self.os_version = self._determine_mac_version(platform_module.mac_ver()[0])
if self.os_name.startswith('win'):
@@ -62,6 +64,9 @@ class PlatformInfo(object):
def is_linux(self):
return self.os_name == 'linux'
+ def is_freebsd(self):
+ return self.os_name == 'freebsd'
+
def display_name(self):
# platform.platform() returns Darwin information for Mac, which is just confusing.
if self.is_mac():
@@ -93,6 +98,8 @@ class PlatformInfo(object):
return 'linux'
if sys_platform in ('win32', 'cygwin'):
return 'win'
+ if sys_platform.startswith('freebsd'):
+ return 'freebsd'
raise AssertionError('unrecognized platform string "%s"' % sys_platform)
def _determine_mac_version(self, mac_version_string):
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py b/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
index 7302761de..c953aa185 100644
--- a/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo_mock.py
@@ -28,8 +28,9 @@
class MockPlatformInfo(object):
- os_name = 'mac'
- os_version = 'snowleopard'
+ def __init__(self, os_name='mac', os_version='snowleopard'):
+ self.os_name = os_name
+ self.os_version = os_version
def is_mac(self):
return self.os_name == 'mac'
@@ -40,6 +41,9 @@ class MockPlatformInfo(object):
def is_win(self):
return self.os_name == 'win'
+ def is_freebsd(self):
+ return self.os_name == 'freebsd'
+
def display_name(self):
return "MockPlatform 1.0"
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py b/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
index ac9cfb0d7..5a1f85fc3 100644
--- a/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
@@ -45,7 +45,7 @@ def fake_sys(platform_str='darwin', windows_version_tuple=None):
return FakeSysModule()
-def fake_platform(mac_version_string='10.6.3'):
+def fake_platform(mac_version_string='10.6.3', release_string='bar'):
class FakePlatformModule(object):
def mac_ver(self):
@@ -54,6 +54,9 @@ def fake_platform(mac_version_string='10.6.3'):
def platform(self):
return 'foo'
+ def release(self):
+ return release_string
+
return FakePlatformModule()
@@ -75,7 +78,7 @@ class TestPlatformInfo(unittest.TestCase):
self.assertNotEquals(info.os_name, '')
self.assertNotEquals(info.os_version, '')
self.assertNotEquals(info.display_name(), '')
- self.assertTrue(info.is_mac() or info.is_win() or info.is_linux())
+ self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() or info.is_freebsd())
if info.is_mac():
self.assertTrue(info.total_bytes_memory() > 0)
@@ -89,29 +92,41 @@ class TestPlatformInfo(unittest.TestCase):
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
+ self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('linux3'))
self.assertTrue(info.is_linux())
self.assertFalse(info.is_mac())
self.assertFalse(info.is_win())
+ self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'))
self.assertEquals(info.os_name, 'mac')
self.assertFalse(info.is_linux())
self.assertTrue(info.is_mac())
self.assertFalse(info.is_win())
+ self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
self.assertEquals(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
+ self.assertFalse(info.is_freebsd())
info = self.make_info(fake_sys('cygwin'), executive=fake_executive('6.1.7600'))
self.assertEquals(info.os_name, 'win')
self.assertFalse(info.is_linux())
self.assertFalse(info.is_mac())
self.assertTrue(info.is_win())
+ self.assertFalse(info.is_freebsd())
+
+ info = self.make_info(fake_sys('freebsd8'))
+ self.assertEquals(info.os_name, 'freebsd')
+ self.assertFalse(info.is_linux())
+ self.assertFalse(info.is_mac())
+ self.assertFalse(info.is_win())
+ self.assertTrue(info.is_freebsd())
self.assertRaises(AssertionError, self.make_info, fake_sys('vms'))
@@ -124,6 +139,9 @@ class TestPlatformInfo(unittest.TestCase):
self.assertEquals(self.make_info(fake_sys('linux2')).os_version, 'lucid')
+ self.assertEquals(self.make_info(fake_sys('freebsd8'), fake_platform('', '8.3-PRERELEASE')).os_version, '8.3-PRERELEASE')
+ self.assertEquals(self.make_info(fake_sys('freebsd9'), fake_platform('', '9.0-RELEASE')).os_version, '9.0-RELEASE')
+
self.assertRaises(AssertionError, self.make_info, fake_sys('win32', tuple([5, 0, 1234])))
self.assertEquals(self.make_info(fake_sys('win32', tuple([6, 2, 1234]))).os_version, 'future')
self.assertEquals(self.make_info(fake_sys('win32', tuple([6, 1, 7600]))).os_version, '7sp0')
@@ -146,6 +164,9 @@ class TestPlatformInfo(unittest.TestCase):
info = self.make_info(fake_sys('linux2'))
self.assertNotEquals(info.display_name(), '')
+ info = self.make_info(fake_sys('freebsd9'))
+ self.assertNotEquals(info.display_name(), '')
+
def test_total_bytes_memory(self):
info = self.make_info(fake_sys('darwin'), fake_platform('10.6.3'), fake_executive('1234'))
self.assertEquals(info.total_bytes_memory(), 1234)
@@ -156,6 +177,9 @@ class TestPlatformInfo(unittest.TestCase):
info = self.make_info(fake_sys('linux2'))
self.assertEquals(info.total_bytes_memory(), None)
+ info = self.make_info(fake_sys('freebsd9'))
+ self.assertEquals(info.total_bytes_memory(), None)
+
def test_free_bytes_memory(self):
vmstat_output = ("Mach Virtual Memory Statistics: (page size of 4096 bytes)\n"
"Pages free: 1.\n"
@@ -169,6 +193,9 @@ class TestPlatformInfo(unittest.TestCase):
info = self.make_info(fake_sys('linux2'))
self.assertEquals(info.free_bytes_memory(), None)
+ info = self.make_info(fake_sys('freebsd9'))
+ self.assertEquals(info.free_bytes_memory(), None)
+
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/systemhost_mock.py b/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
index f3bc94139..4667b08b9 100644
--- a/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/systemhost_mock.py
@@ -35,9 +35,9 @@ from webkitpy.common.system.workspace_mock import MockWorkspace
class MockSystemHost(object):
- def __init__(self, log_executive=False, executive_throws_when_run=None, os_name=None, os_version=None):
- self.executive = MockExecutive(should_log=log_executive, should_throw_when_run=executive_throws_when_run)
- self.filesystem = MockFileSystem()
+ def __init__(self, log_executive=False, executive_throws_when_run=None, os_name=None, os_version=None, executive=None, filesystem=None):
+ self.executive = executive or MockExecutive(should_log=log_executive, should_throw_when_run=executive_throws_when_run)
+ self.filesystem = filesystem or MockFileSystem()
self.user = MockUser()
self.platform = MockPlatformInfo()
if os_name:
diff --git a/Tools/Scripts/webkitpy/common/system/user.py b/Tools/Scripts/webkitpy/common/system/user.py
index 31b218c2f..e20405912 100644
--- a/Tools/Scripts/webkitpy/common/system/user.py
+++ b/Tools/Scripts/webkitpy/common/system/user.py
@@ -29,12 +29,16 @@
import getpass
import logging
import os
+import platform
import re
import shlex
import subprocess
import sys
import webbrowser
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.platforminfo import PlatformInfo
+
_log = logging.getLogger(__name__)
@@ -45,17 +49,17 @@ except ImportError:
if sys.platform != "win32":
# There is no readline module for win32, not much to do except cry.
_log.warn("Unable to import readline.")
- # FIXME: We could give instructions for non-mac platforms.
- # Lack of readline results in a very bad user experiance.
- if sys.platform == "darwin":
- _log.warn("If you're using MacPorts, try running:")
- _log.warn(" sudo port install py25-readline")
class User(object):
DEFAULT_NO = 'n'
DEFAULT_YES = 'y'
+ def __init__(self, platforminfo=None):
+ # We cannot get the PlatformInfo object from a SystemHost because
+ # User is part of SystemHost itself.
+ self._platforminfo = platforminfo or PlatformInfo(sys, platform, Executive())
+
# FIXME: These are @classmethods because bugzilla.py doesn't have a Tool object (thus no User instance).
@classmethod
def prompt(cls, message, repeat=1, raw_input=raw_input):
@@ -107,7 +111,7 @@ class User(object):
def edit_changelog(self, files):
edit_application = os.environ.get("CHANGE_LOG_EDIT_APPLICATION")
- if edit_application and sys.platform == "darwin":
+ if edit_application and self._platforminfo.is_mac():
# On Mac we support editing ChangeLogs using an application.
args = shlex.split(edit_application)
print "Using editor in the CHANGE_LOG_EDIT_APPLICATION environment variable."
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 150a50a4d..cd6c55a4d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -88,6 +88,7 @@ def interpret_test_failures(port, test_name, failures):
elif isinstance(failure, test_failures.FailureReftestMismatch):
test_dict['is_reftest'] = True
test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename)
+ test_dict['image_diff_percent'] = failure.diff_percent
elif isinstance(failure, test_failures.FailureReftestMismatchDidNotOccur):
test_dict['is_mismatch_reftest'] = True
test_dict['ref_file'] = port.relative_test_filename(failure.reference_filename)
@@ -170,6 +171,9 @@ def summarize_results(port_obj, expectations, result_summary, retry_summary, tes
if result.has_stderr:
test_dict['has_stderr'] = True
+ if expectations.has_modifier(test_name, test_expectations.WONTFIX):
+ test_dict['wontfix'] = True
+
if result_type == test_expectations.PASS:
num_passes += 1
# FIXME: include passing tests that have stderr output.
@@ -281,12 +285,6 @@ class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
-
- # The per-test timeout in milliseconds, if no --time-out-ms option was
- # given to run_webkit_tests. This should correspond to the default timeout
- # in DumpRenderTree.
- DEFAULT_TEST_TIMEOUT_MS = 6 * 1000
-
def __init__(self, port, options, printer):
"""Initialize test runner data structures.
@@ -303,6 +301,7 @@ class Manager(object):
self._expectations = None
self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
+ self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
self._has_http_lock = False
@@ -359,18 +358,23 @@ class Manager(object):
def _http_tests(self):
return set(test for test in self._test_files if self._is_http_test(test))
+ def _is_perf_test(self, test):
+ return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
+
def parse_expectations(self):
"""Parse the expectations from the test_list files and return a data
structure holding them. Throws an error if the test_list files have
invalid syntax."""
port = self._port
+ tests_to_ignore = set(self._options.ignore_tests)
self._expectations = test_expectations.TestExpectations(
port,
self._test_files,
port.test_expectations(),
port.test_configuration(),
self._options.lint_test_files,
- port.test_expectations_overrides())
+ port.test_expectations_overrides(),
+ port.skipped_layout_tests(self._test_files).union(tests_to_ignore))
def _split_into_chunks_if_necessary(self, skipped):
if not self._options.run_chunk and not self._options.run_part:
@@ -554,8 +558,10 @@ class Manager(object):
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
- running multiple copies of NRWTs."""
- return self._is_http_test(test_file)
+ running multiple copies of NRWTs. Perf tests are locked
+ because heavy load caused by running other tests in parallel
+ might cause some of them to timeout."""
+ return self._is_http_test(test_file) or self._is_perf_test(test_file)
def _test_is_slow(self, test_file):
return self._expectations.has_modifier(test_file, test_expectations.SLOW)
@@ -708,7 +714,7 @@ class Manager(object):
self._printer.print_config("Running %d %ss in parallel over %d shards (%d locked)" %
(num_workers, driver_name, num_shards, num_locked_shards))
- def _run_tests(self, file_list, result_summary):
+ def _run_tests(self, file_list, result_summary, num_workers):
"""Runs the tests in the file_list.
Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
@@ -734,7 +740,7 @@ class Manager(object):
thread_timings = []
self._printer.print_update('Sharding tests ...')
- locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.experimental_fully_parallel)
+ locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.fully_parallel)
# FIXME: We don't have a good way to coordinate the workers so that
# they don't try to run the shards that need a lock if we don't actually
@@ -749,10 +755,10 @@ class Manager(object):
if locked_shards:
self.start_servers_with_lock()
- num_workers = min(int(self._options.child_processes), len(all_shards))
+ num_workers = min(num_workers, len(all_shards))
self._log_num_workers(num_workers, len(all_shards), len(locked_shards))
- manager_connection = manager_worker_broker.get(self._options.worker_model, self, worker.Worker)
+ manager_connection = manager_worker_broker.get(num_workers, self, worker.Worker)
if self._options.dry_run:
return (keyboard_interrupted, interrupted, thread_timings, self._group_stats, self._all_results)
@@ -761,7 +767,7 @@ class Manager(object):
for worker_number in xrange(num_workers):
worker_arguments = worker.WorkerArguments(worker_number, self.results_directory(), self._options)
worker_connection = manager_connection.start_worker(worker_arguments)
- if self._options.worker_model == 'inline':
+ if num_workers == 1:
# FIXME: We need to be able to share a port with the work so
# that some of the tests can query state on the port; ideally
# we'd rewrite the tests so that this wasn't necessary.
@@ -817,6 +823,7 @@ class Manager(object):
self.cancel_workers()
raise
finally:
+ manager_connection.cleanup()
self.stop_servers_with_lock()
thread_timings = [worker_state.stats for worker_state in self._worker_states.values()]
@@ -837,7 +844,7 @@ class Manager(object):
def needs_servers(self):
return any(self._test_requires_lock(test_name) for test_name in self._test_files) and self._options.http
- def set_up_run(self):
+ def _set_up_run(self):
"""Configures the system to be ready to run tests.
Returns a ResultSummary object if we should continue to run tests,
@@ -846,8 +853,9 @@ class Manager(object):
"""
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
- self._printer.print_update("Starting helper ...")
- self._port.start_helper()
+ if self._options.pixel_tests:
+ self._printer.print_update("Starting pixel test helper ...")
+ self._port.start_helper()
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
@@ -871,7 +879,7 @@ class Manager(object):
return result_summary
- def run(self, result_summary):
+ def run(self):
"""Run all our tests on all our test files.
For each test file, we run each test type. If there are any failures,
@@ -886,15 +894,20 @@ class Manager(object):
# collect_tests() must have been called first to initialize us.
# If we didn't find any files to test, we've errored out already in
# prepare_lists_and_print_output().
+
+ result_summary = self._set_up_run()
+ if not result_summary:
+ return -1
+
assert(len(self._test_files))
start_time = time.time()
- interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = self._run_tests(self._test_files_list, result_summary)
+ interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = self._run_tests(self._test_files_list, result_summary, int(self._options.child_processes))
# We exclude the crashes from the list of results to retry, because
# we want to treat even a potentially flaky crash as an error.
- failures = self._get_failures(result_summary, include_crashes=False, include_missing=False)
+ failures = self._get_failures(result_summary, include_crashes=self._port.should_retry_crashes(), include_missing=False)
retry_summary = result_summary
while (len(failures) and self._options.retry_failures and not self._retrying and not interrupted and not keyboard_interrupted):
_log.info('')
@@ -903,17 +916,16 @@ class Manager(object):
self._retrying = True
retry_summary = ResultSummary(self._expectations, failures.keys())
# Note that we intentionally ignore the return value here.
- self._run_tests(failures.keys(), retry_summary)
+ self._run_tests(failures.keys(), retry_summary, num_workers=1)
failures = self._get_failures(retry_summary, include_crashes=True, include_missing=True)
end_time = time.time()
+ self._clean_up_run()
+
self._print_timing_statistics(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary)
self._print_result_summary(result_summary)
- sys.stdout.flush()
- sys.stderr.flush()
-
self._printer.print_one_line_summary(result_summary.total, result_summary.expected, result_summary.unexpected)
unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted)
@@ -959,15 +971,16 @@ class Manager(object):
self._port.release_http_lock()
self._has_http_lock = False
- def clean_up_run(self):
+ def _clean_up_run(self):
"""Restores the system after we're done running tests."""
-
_log.debug("flushing stdout")
sys.stdout.flush()
_log.debug("flushing stderr")
sys.stderr.flush()
_log.debug("stopping helper")
self._port.stop_helper()
+ _log.debug("cleaning up port")
+ self._port.clean_up_test_run()
def update_summary(self, result_summary):
"""Update the summary and print results with any completed tests."""
@@ -980,12 +993,22 @@ class Manager(object):
self._update_summary_with_result(result_summary, result)
+ def _mark_interrupted_tests_as_skipped(self, result_summary):
+ for test_name in self._test_files:
+ if test_name not in result_summary.results:
+ result = test_results.TestResult(test_name, [test_failures.FailureEarlyExit()])
+ # FIXME: We probably need to loop here if there are multiple iterations.
+ # FIXME: Also, these results are really neither expected nor unexpected. We probably
+ # need a third type of result.
+ result_summary.add(result, expected=False)
+
def _interrupt_if_at_failure_limits(self, result_summary):
# Note: The messages in this method are constructed to match old-run-webkit-tests
# so that existing buildbot grep rules work.
def interrupt_if_at_failure_limit(limit, failure_count, result_summary, message):
if limit and failure_count >= limit:
message += " %d tests run." % (result_summary.expected + result_summary.unexpected)
+ self._mark_interrupted_tests_as_skipped(result_summary)
raise TestRunInterruptedException(message)
interrupt_if_at_failure_limit(
@@ -1122,7 +1145,6 @@ class Manager(object):
p.print_config('Command line: ' +
' '.join(self._port.driver_cmd_line()))
- p.print_config("Worker model: %s" % self._options.worker_model)
p.print_config("")
def _print_expected_results_of_type(self, result_summary,
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
index 5e965ca4a..8bff7439a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -214,13 +214,11 @@ class ManagerTest(unittest.TestCase):
host = MockHost()
port = host.port_factory.get(port_name=options.platform, options=options)
run_webkit_tests._set_up_derived_options(port, options)
- printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO(), configure_logging=False)
+ printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO())
manager = LockCheckingManager(port, options, printer)
manager.collect_tests(args)
manager.parse_expectations()
- result_summary = manager.set_up_run()
- num_unexpected_results = manager.run(result_summary)
- manager.clean_up_run()
+ num_unexpected_results = manager.run()
printer.cleanup()
tester.assertEquals(num_unexpected_results, 0)
@@ -231,7 +229,8 @@ class ManagerTest(unittest.TestCase):
manager = Manager(port=port, options=MockOptions(), printer=Mock())
manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
- result_summary = ResultSummary(expectations=Mock(), test_files=[])
+ manager._test_files = ['foo/bar.html', 'baz.html']
+ result_summary = ResultSummary(expectations=Mock(), test_files=manager._test_files)
result_summary.unexpected_failures = 100
result_summary.unexpected_crashes = 50
result_summary.unexpected_timeouts = 50
@@ -247,6 +246,9 @@ class ManagerTest(unittest.TestCase):
manager._options.exit_after_n_crashes_or_timeouts = 10
self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
+ self.assertEquals(result_summary.results['foo/bar.html'].type, test_expectations.SKIP)
+ self.assertEquals(result_summary.results['baz.html'].type, test_expectations.SKIP)
+
manager._options.exit_after_n_crashes_or_timeouts = None
manager._options.exit_after_n_failures = 10
exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
@@ -387,9 +389,15 @@ class ResultSummaryTest(unittest.TestCase):
return test_names, result_summary.ResultSummary(expectations, test_names), expectations
# FIXME: Use this to test more of summarize_results. This was moved from printing_unittest.py.
- def get_unexpected_results(self, port, expected, passing, flaky):
- tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html']
+ def summarized_results(self, port, expected, passing, flaky, extra_tests=[], extra_expectations=None):
+ tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/wontfix.html']
+ if extra_tests:
+ tests.extend(extra_tests)
+
expectations = ''
+ if extra_expectations:
+ expectations += extra_expectations
+
paths, rs, exp = self.get_result_summary(port, tests, expectations)
if expected:
rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected)
@@ -403,6 +411,10 @@ class ResultSummaryTest(unittest.TestCase):
rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected)
rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected)
rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected)
+
+ for test in extra_tests:
+ rs.add(self.get_result(test, test_expectations.CRASH), expected)
+
retry = rs
if flaky:
paths, retry, exp = self.get_result_summary(port, tests, expectations)
@@ -410,20 +422,29 @@ class ResultSummaryTest(unittest.TestCase):
retry.add(self.get_result('failures/expected/timeout.html'), True)
retry.add(self.get_result('failures/expected/crash.html'), True)
unexpected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
- return unexpected_results
+ expected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False)
+ return expected_results, unexpected_results
def test_no_svn_revision(self):
host = MockHost()
port = host.port_factory.get('test')
- results = self.get_unexpected_results(port, expected=False, passing=False, flaky=False)
- self.assertTrue('revision' not in results)
+ expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False)
+ self.assertTrue('revision' not in unexpected_results)
def test_svn_revision(self):
host = MockHost()
port = host.port_factory.get('test')
port._options.builder_name = 'dummy builder'
- results = self.get_unexpected_results(port, expected=False, passing=False, flaky=False)
- self.assertTrue('revision' in results)
+ expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False)
+ self.assertTrue('revision' in unexpected_results)
+
+ def test_summarized_results_wontfix(self):
+ host = MockHost()
+ port = host.port_factory.get('test')
+ port._options.builder_name = 'dummy builder'
+ port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), "failures/expected/wontfix.html"), "Dummy test contents")
+ expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False, extra_tests=['failures/expected/wontfix.html'], extra_expectations='BUGX WONTFIX : failures/expected/wontfix.html = FAIL\n')
+ self.assertTrue(expected_results['tests']['failures']['expected']['wontfix.html']['wontfix'])
if __name__ == '__main__':
port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py
index 70c43a6d4..7047d939c 100755
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker.py
@@ -87,11 +87,11 @@ MANAGER_TOPIC = 'managers'
ANY_WORKER_TOPIC = 'workers'
-def get(worker_model, client, worker_class):
+def get(max_workers, client, worker_class):
"""Return a connection to a manager/worker message_broker
Args:
- worker_model - concurrency model to use (inline/processes)
+ max_workers - max # of workers to run concurrently.
client - BrokerClient implementation to dispatch
replies to.
worker_class - type of workers to create. This class should override
@@ -99,14 +99,12 @@ def get(worker_model, client, worker_class):
Returns:
A handle to an object that will talk to a message broker configured
for the normal manager/worker communication."""
- if worker_model == 'inline':
+ if max_workers == 1:
queue_class = Queue.Queue
manager_class = _InlineManager
- elif worker_model == 'processes':
+ else:
queue_class = multiprocessing.Queue
manager_class = _MultiProcessManager
- else:
- raise ValueError("unsupported value for --worker-model: %s" % worker_model)
broker = _Broker(queue_class)
return manager_class(broker, client, worker_class)
@@ -145,6 +143,15 @@ class _Broker(object):
self._queue_maker = queue_maker
self._topics = {}
+ def __del__(self):
+ self.cleanup()
+
+ def cleanup(self):
+ for queue in self._topics.values():
+ if hasattr(queue, 'close'):
+ queue.close()
+ self._topics = {}
+
def add_topic(self, topic_name):
if topic_name not in self._topics:
self._topics[topic_name] = self._queue_maker()
@@ -228,6 +235,10 @@ class _BrokerConnection(object):
broker.add_topic(run_topic)
broker.add_topic(post_topic)
+ def cleanup(self):
+ self._broker.cleanup()
+ self._broker = None
+
def run_message_loop(self, delay_secs=None):
self._broker.run_message_loop(self._run_topic, self._client, delay_secs)
@@ -274,7 +285,6 @@ class AbstractWorker(BrokerClient):
"""Callback for the worker to start executing. Typically does any
remaining initialization and then calls broker_connection.run_message_loop()."""
exception_msg = ""
- _log.debug("%s starting" % self._name)
try:
self._worker_connection.run_message_loop()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py
index 5457a2d26..93806e7d8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_worker_broker_unittest.py
@@ -46,12 +46,13 @@ stopping_queue = None
WORKER_NAME = 'TestWorker'
-def make_broker(manager, worker_model, start_queue=None, stop_queue=None):
+
+def make_broker(manager, max_workers, start_queue=None, stop_queue=None):
global starting_queue
global stopping_queue
starting_queue = start_queue
stopping_queue = stop_queue
- return manager_worker_broker.get(worker_model, manager, _TestWorker)
+ return manager_worker_broker.get(max_workers, manager, _TestWorker)
class _TestWorker(manager_worker_broker.AbstractWorker):
@@ -87,16 +88,13 @@ class _TestWorker(manager_worker_broker.AbstractWorker):
class FunctionTests(unittest.TestCase):
def test_get__inline(self):
- self.assertTrue(make_broker(self, 'inline') is not None)
+ self.assertTrue(make_broker(self, 1) is not None)
def test_get__processes(self):
# This test sometimes fails on Windows. See <http://webkit.org/b/55087>.
if sys.platform in ('cygwin', 'win32'):
return
- self.assertTrue(make_broker(self, 'processes') is not None)
-
- def test_get__unknown(self):
- self.assertRaises(ValueError, make_broker, self, 'unknown')
+ self.assertTrue(make_broker(self, 2) is not None)
class _TestsMixin(object):
@@ -125,10 +123,10 @@ class _TestsMixin(object):
self._broker = None
self._done = False
self._exception = None
- self._worker_model = None
+ self._max_workers = None
def make_broker(self, starting_queue=None, stopping_queue=None):
- self._broker = make_broker(self, self._worker_model, starting_queue,
+ self._broker = make_broker(self, self._max_workers, starting_queue,
stopping_queue)
def test_name(self):
@@ -138,14 +136,16 @@ class _TestsMixin(object):
worker.cancel()
worker.join(0.1)
self.assertFalse(worker.is_alive())
+ self._broker.cleanup()
def test_cancel(self):
self.make_broker()
worker = self._broker.start_worker()
- worker.cancel()
self._broker.post_message('test', 1, 'hello, world')
+ worker.cancel()
worker.join(0.1)
self.assertFalse(worker.is_alive())
+ self._broker.cleanup()
def test_done(self):
self.make_broker()
@@ -158,6 +158,7 @@ class _TestsMixin(object):
self.assertTrue(self.is_done())
self.assertEqual(self._an_int, 2)
self.assertEqual(self._a_str, 'hi, everybody')
+ self._broker.cleanup()
def test_unknown_message(self):
self.make_broker()
@@ -172,12 +173,13 @@ class _TestsMixin(object):
finally:
worker.join(0.5)
self.assertFalse(worker.is_alive())
+ self._broker.cleanup()
class InlineBrokerTests(_TestsMixin, unittest.TestCase):
def setUp(self):
_TestsMixin.setUp(self)
- self._worker_model = 'inline'
+ self._max_workers = 1
def test_inline_arguments(self):
self.make_broker()
@@ -195,7 +197,7 @@ if sys.platform not in ('cygwin', 'win32'):
class MultiProcessBrokerTests(_TestsMixin, unittest.TestCase):
def setUp(self):
_TestsMixin.setUp(self)
- self._worker_model = 'processes'
+ self._max_workers = 2
class InterfaceTest(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
index 6375820cd..3f51410cf 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -51,14 +51,13 @@ class SingleTestRunner:
def __init__(self, options, port, driver, test_input, worker_name):
self._options = options
self._port = port
+ self._filesystem = port.host.filesystem
self._driver = driver
self._timeout = test_input.timeout
self._worker_name = worker_name
self._test_name = test_input.test_name
self._should_run_pixel_test = test_input.should_run_pixel_test
-
- self._is_reftest = False
- self._reference_files = port.reference_files(self._test_name)
+ self._reference_files = test_input.reference_files
if self._reference_files:
# Detect and report a test which has a wrong combination of expectation files.
@@ -67,7 +66,7 @@ class SingleTestRunner:
# in either layout tests or reftests, but not in both.
for suffix in ('.txt', '.png', '.wav'):
expected_filename = self._port.expected_filename(self._test_name, suffix)
- if port.host.filesystem.exists(expected_filename):
+ if self._filesystem.exists(expected_filename):
_log.error('%s is both a reftest and has an expected output file %s.',
self._test_name, expected_filename)
@@ -91,7 +90,7 @@ class SingleTestRunner:
image_hash = None
if self._should_fetch_expected_checksum():
image_hash = self._port.expected_checksum(self._test_name)
- return DriverInput(self._test_name, self._timeout, image_hash, bool(self._reference_files))
+ return DriverInput(self._test_name, self._timeout, image_hash, self._should_run_pixel_test)
def run(self):
if self._reference_files:
@@ -115,13 +114,13 @@ class SingleTestRunner:
test_result = self._compare_output(driver_output, expected_driver_output)
if self._options.new_test_results:
self._add_missing_baselines(test_result, driver_output)
- test_result_writer.write_test_result(self._port, self._test_name, driver_output, expected_driver_output, test_result.failures)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input())
failures = self._handle_error(driver_output)
- test_result_writer.write_test_result(self._port, self._test_name, driver_output, None, failures)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be bettter to avoid
# to write new baselines.
self._overwrite_baselines(driver_output)
@@ -162,7 +161,7 @@ class SingleTestRunner:
if data is None:
return
port = self._port
- fs = port._filesystem
+ fs = self._filesystem
if generate_new_baseline:
relative_dir = fs.dirname(self._test_name)
baseline_path = port.baseline_path()
@@ -187,7 +186,7 @@ class SingleTestRunner:
which html file is used for producing the driver_output.
"""
failures = []
- fs = self._port._filesystem
+ fs = self._filesystem
if driver_output.timeout:
failures.append(test_failures.FailureTimeout(bool(reference_filename)))
@@ -197,7 +196,9 @@ class SingleTestRunner:
testname = self._test_name
if driver_output.crash:
- failures.append(test_failures.FailureCrash(bool(reference_filename)))
+ failures.append(test_failures.FailureCrash(bool(reference_filename),
+ driver_output.crashed_process_name,
+ driver_output.crashed_pid))
if driver_output.error:
_log.debug("%s %s crashed, stack trace:" % (self._worker_name, testname))
else:
@@ -287,7 +288,7 @@ class SingleTestRunner:
putAllMismatchBeforeMatch = sorted
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
- reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, is_reftest=True))
+ reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, test_output.image_hash, should_run_pixel_test=True))
test_result = self._compare_output_with_reference(test_output, reference_output, reference_filename, expectation == '!=')
if (expectation == '!=' and test_result.failures) or (expectation == '==' and not test_result.failures):
@@ -295,7 +296,7 @@ class SingleTestRunner:
total_test_time += test_result.test_run_time
assert(reference_output)
- test_result_writer.write_test_result(self._port, self._test_name, test_output, reference_output, test_result.failures)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, test_output, reference_output, test_result.failures)
return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr)
def _compare_output_with_reference(self, driver_output1, driver_output2, reference_filename, mismatch):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py
index 58a18359d..31029a35f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_expectations_editor_unittest.py
@@ -46,27 +46,27 @@ class MockBugManager(object):
class TestExpectationEditorTests(unittest.TestCase):
- WIN_RELEASE_CPU_CONFIGS = set([
- TestConfiguration('vista', 'x86', 'release', 'cpu'),
- TestConfiguration('win7', 'x86', 'release', 'cpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
+ WIN_RELEASE_CONFIGS = set([
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
])
RELEASE_CONFIGS = set([
- TestConfiguration('vista', 'x86', 'release', 'cpu'),
- TestConfiguration('win7', 'x86', 'release', 'cpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
- TestConfiguration('vista', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'cpu'),
- TestConfiguration('leopard', 'x86', 'release', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('leopard', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('leopard', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('leopard', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
])
def __init__(self, testFunc):
@@ -129,14 +129,13 @@ BUGX1 MAC : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
self.assert_remove_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP RELEASE : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
self.assert_remove_roundtrip("""
BUGX1 WIN : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE
BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
@@ -144,14 +143,12 @@ BUGX2 WIN : failures/expected/audio.html = IMAGE""")
self.assert_remove_roundtrip("""
BUGX1 XP : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE
-BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
self.assert_remove_roundtrip("""
BUGX1 : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE
BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
BUGX1 LINUX MAC VISTA WIN7 : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
@@ -159,28 +156,27 @@ BUGX2 WIN : failures/expected/audio.html = IMAGE""")
self.assert_remove_roundtrip("""
BUGX1 WIN : failures/expected = PASS
BUGX2 XP RELEASE : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX1 WIN : failures/expected = PASS
-BUGX2 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE""")
+BUGX1 WIN : failures/expected = PASS""")
self.assert_remove_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE
-BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""")
+BUGX1 XP RELEASE : failures/expected/keyboard.html = IMAGE
+BUGX2 XP DEBUG : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX2 XP DEBUG : failures/expected/keyboard.html = IMAGE""")
self.assert_remove_roundtrip("""
BUGX1 WIN : failures/expected = FAIL""", 'failures/expected/keyboard.html', """
BUGX1 WIN : failures/expected = FAIL""")
self.assert_remove_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE PASS
-BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = PASS IMAGE
-BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""")
+BUGX1 XP RELEASE : failures/expected/keyboard.html = IMAGE PASS
+BUGX2 XP DEBUG : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX1 XP RELEASE : failures/expected/keyboard.html = PASS IMAGE
+BUGX2 XP DEBUG : failures/expected/keyboard.html = IMAGE""")
self.assert_remove_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE PASS
-BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
-BUGX2 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE""", remove_flakes=True)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = IMAGE PASS
+BUGX2 XP DEBUG : failures/expected/keyboard.html = IMAGE""", 'failures/expected/keyboard.html', """
+BUGX2 XP DEBUG : failures/expected/keyboard.html = IMAGE""", remove_flakes=True)
def test_remove_expectation_multiple(self):
in_string = """
@@ -191,40 +187,26 @@ BUGX2 WIN : failures/expected/audio.html = IMAGE"""
editor = TestExpectationsEditor(expectation_lines, MockBugManager())
test = "failures/expected/keyboard.html"
- editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'release', 'cpu')]))
+ editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'release')]))
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = IMAGE
BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
- editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'debug', 'cpu')]))
+ editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'debug')]))
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
- editor.remove_expectation(test, set([TestConfiguration('vista', 'x86', 'debug', 'gpu'), TestConfiguration('win7', 'x86', 'release', 'gpu')]))
+ editor.remove_expectation(test, set([TestConfiguration('vista', 'x86', 'debug'), TestConfiguration('win7', 'x86', 'release')]))
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 VISTA DEBUG CPU : failures/expected/keyboard.html = IMAGE
-BUGX1 WIN7 DEBUG GPU : failures/expected/keyboard.html = IMAGE
-BUGX1 WIN7 CPU : failures/expected/keyboard.html = IMAGE
-BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA RELEASE : failures/expected/keyboard.html = IMAGE
-BUGX2 WIN : failures/expected/audio.html = IMAGE""")
-
- editor.remove_expectation(test, set([TestConfiguration('xp', 'x86', 'debug', 'gpu'), TestConfiguration('xp', 'x86', 'release', 'gpu')]))
- self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 VISTA DEBUG CPU : failures/expected/keyboard.html = IMAGE
-BUGX1 WIN7 RELEASE CPU : failures/expected/keyboard.html = IMAGE
BUGX1 WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
-BUGX1 VISTA RELEASE : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
- editor.remove_expectation(test, set([TestConfiguration('vista', 'x86', 'debug', 'cpu'), TestConfiguration('vista', 'x86', 'debug', 'gpu'), TestConfiguration('vista', 'x86', 'release', 'gpu')]))
+ editor.remove_expectation(test, set([TestConfiguration('vista', 'x86', 'debug'), TestConfiguration('vista', 'x86', 'release')]))
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
BUGX1 WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
-BUGX1 VISTA WIN7 RELEASE CPU : failures/expected/keyboard.html = IMAGE
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
editor.remove_expectation(test, set(self.test_port.all_test_configurations()))
@@ -238,88 +220,81 @@ BUGX2 WIN : failures/expected/audio.html = IMAGE""")
def test_update_expectation(self):
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUG_NEWLY_CREATED XP RELEASE : failures/expected/keyboard.html = IMAGE""", 1)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 XP RELEASE CPU : failures/expected = TEXT
-BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1)
+BUGX1 XP RELEASE : failures/expected = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 XP RELEASE : failures/expected = TEXT
+BUG_NEWLY_CREATED XP RELEASE : failures/expected/keyboard.html = IMAGE""", 1)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
-BUGX1 XP RELEASE CPU : failures/expected = TEXT
-BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = PASS""", 1)
+BUGX1 XP RELEASE : failures/expected = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
+BUGX1 XP RELEASE : failures/expected = TEXT
+BUG_NEWLY_CREATED XP RELEASE : failures/expected/keyboard.html = PASS""", 1)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([TEXT]), """
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 0)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([TEXT]), """
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 0)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGAWESOME XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1, parsed_bug_modifiers=['BUGAWESOME'])
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGAWESOME XP RELEASE : failures/expected/keyboard.html = IMAGE""", 1, parsed_bug_modifiers=['BUGAWESOME'])
self.assert_update_roundtrip("""
BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT
-BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2)
+BUG_NEWLY_CREATED XP RELEASE : failures/expected/keyboard.html = IMAGE""", 1)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT""", 1)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1)
self.assert_update_roundtrip("""
BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT
-BUGAWESOME XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2, parsed_bug_modifiers=['BUGAWESOME'])
+BUGAWESOME XP RELEASE : failures/expected/keyboard.html = IMAGE""", 1, parsed_bug_modifiers=['BUGAWESOME'])
self.assert_update_roundtrip("""
BUGX1 WIN : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
-BUGX1 XP GPU : failures/expected/keyboard.html = TEXT
+BUGX1 XP DEBUG : failures/expected/keyboard.html = TEXT
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = TEXT
-BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2)
+BUG_NEWLY_CREATED XP RELEASE : failures/expected/keyboard.html = IMAGE""", 2)
self.assert_update_roundtrip("""
BUGX1 WIN : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), """
-BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
-BUGX1 XP GPU : failures/expected/keyboard.html = TEXT
+BUGX1 XP DEBUG : failures/expected/keyboard.html = TEXT
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = TEXT""", 1)
self.assert_update_roundtrip("""
BUGX1 WIN : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
-BUGX1 XP GPU : failures/expected/keyboard.html = TEXT
+BUGX1 XP DEBUG : failures/expected/keyboard.html = TEXT
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = TEXT
-BUG_NEWLY_CREATED XP RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2)
+BUG_NEWLY_CREATED XP RELEASE : failures/expected/keyboard.html = IMAGE""", 2)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU: failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUG_NEWLY_CREATED WIN RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUG_NEWLY_CREATED WIN RELEASE : failures/expected/keyboard.html = IMAGE""", 2, test_configs=self.WIN_RELEASE_CONFIGS)
self.assert_update_roundtrip("""
-BUGX1 XP RELEASE CPU: failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([PASS]), '', 1, test_configs=self.WIN_RELEASE_CONFIGS)
self.assert_update_roundtrip("""
-BUGX1 RELEASE CPU: failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 LINUX MAC RELEASE CPU : failures/expected/keyboard.html = TEXT
-BUG_NEWLY_CREATED WIN RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 2, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+BUGX1 RELEASE : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
+BUGX1 LINUX MAC RELEASE : failures/expected/keyboard.html = TEXT
+BUG_NEWLY_CREATED WIN RELEASE : failures/expected/keyboard.html = IMAGE""", 2, test_configs=self.WIN_RELEASE_CONFIGS)
self.assert_update_roundtrip("""
BUGX1 MAC : failures/expected/keyboard.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
BUGX1 MAC : failures/expected/keyboard.html = TEXT
-BUG_NEWLY_CREATED WIN RELEASE CPU : failures/expected/keyboard.html = IMAGE""", 1, test_configs=self.WIN_RELEASE_CPU_CONFIGS)
+BUG_NEWLY_CREATED WIN RELEASE : failures/expected/keyboard.html = IMAGE""", 1, test_configs=self.WIN_RELEASE_CONFIGS)
def test_update_expectation_relative(self):
self.assert_update_roundtrip("""
BUGX1 XP RELEASE : failures/expected/keyboard.html = TEXT
BUGX2 MAC : failures/expected/audio.html = TEXT""", 'failures/expected/keyboard.html', set([IMAGE]), """
-BUGX1 XP RELEASE GPU : failures/expected/keyboard.html = TEXT
-BUGAWESOME XP RELEASE CPU : failures/expected/keyboard.html = IMAGE
-BUGX2 MAC : failures/expected/audio.html = TEXT""", 2, parsed_bug_modifiers=['BUGAWESOME'])
+BUGAWESOME XP RELEASE : failures/expected/keyboard.html = IMAGE
+BUGX2 MAC : failures/expected/audio.html = TEXT""", 1, parsed_bug_modifiers=['BUGAWESOME'])
def test_update_expectation_multiple(self):
in_string = """
@@ -330,37 +305,31 @@ BUGX2 WIN : failures/expected/audio.html = IMAGE"""
editor = TestExpectationsEditor(expectation_lines, MockBugManager())
test = "failures/expected/keyboard.html"
- editor.update_expectation(test, set([TestConfiguration('xp', 'x86', 'release', 'cpu')]), set([IMAGE_PLUS_TEXT]), ['BUG_UPDATE1'])
+ editor.update_expectation(test, set([TestConfiguration('xp', 'x86', 'release')]), set([IMAGE_PLUS_TEXT]), ['BUG_UPDATE1'])
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 XP DEBUG CPU : failures/expected/keyboard.html = IMAGE
-BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
+BUGX1 XP DEBUG : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
-BUG_UPDATE1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE+TEXT
+BUG_UPDATE1 XP RELEASE : failures/expected/keyboard.html = IMAGE+TEXT
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
- editor.update_expectation(test, set([TestConfiguration('xp', 'x86', 'debug', 'cpu')]), set([TEXT]), ['BUG_UPDATE2'])
+ editor.update_expectation(test, set([TestConfiguration('xp', 'x86', 'debug')]), set([TEXT]), ['BUG_UPDATE2'])
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 XP GPU : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA WIN7 : failures/expected/keyboard.html = IMAGE
-BUG_UPDATE2 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
-BUG_UPDATE1 XP RELEASE CPU : failures/expected/keyboard.html = IMAGE+TEXT
+BUG_UPDATE2 XP DEBUG : failures/expected/keyboard.html = TEXT
+BUG_UPDATE1 XP RELEASE : failures/expected/keyboard.html = IMAGE+TEXT
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
- editor.update_expectation(test, self.WIN_RELEASE_CPU_CONFIGS, set([CRASH]), ['BUG_UPDATE3'])
+ editor.update_expectation(test, self.WIN_RELEASE_CONFIGS, set([CRASH]), ['BUG_UPDATE3'])
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 VISTA DEBUG CPU : failures/expected/keyboard.html = IMAGE
-BUGX1 WIN7 RELEASE GPU : failures/expected/keyboard.html = IMAGE
-BUGX1 WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
-BUGX1 VISTA XP GPU : failures/expected/keyboard.html = IMAGE
-BUG_UPDATE2 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
-BUG_UPDATE3 WIN RELEASE CPU : failures/expected/keyboard.html = CRASH
+BUGX1 VISTA WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
+BUG_UPDATE2 XP DEBUG : failures/expected/keyboard.html = TEXT
+BUG_UPDATE3 WIN RELEASE : failures/expected/keyboard.html = CRASH
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
editor.update_expectation(test, self.RELEASE_CONFIGS, set([FAIL]), ['BUG_UPDATE4'])
self.assertEquals(TestExpectationSerializer.list_to_string(expectation_lines, converter), """
-BUGX1 XP DEBUG GPU : failures/expected/keyboard.html = IMAGE
BUGX1 VISTA WIN7 DEBUG : failures/expected/keyboard.html = IMAGE
-BUG_UPDATE2 XP DEBUG CPU : failures/expected/keyboard.html = TEXT
+BUG_UPDATE2 XP DEBUG : failures/expected/keyboard.html = TEXT
BUG_UPDATE4 RELEASE : failures/expected/keyboard.html = FAIL
BUGX2 WIN : failures/expected/audio.html = IMAGE""")
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
index a40c090b5..fecddad46 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
@@ -29,18 +29,17 @@
import logging
-from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
-def write_test_result(port, test_name, driver_output,
+def write_test_result(filesystem, port, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
root_output_dir = port.results_directory()
- writer = TestResultWriter(port, root_output_dir, test_name)
+ writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
writer.write_stderr(driver_output.error)
@@ -63,15 +62,16 @@ def write_test_result(port, test_name, driver_output,
writer.write_audio_files(driver_output.audio, expected_driver_output.audio)
elif isinstance(failure, test_failures.FailureCrash):
crashed_driver_output = expected_driver_output if failure.is_reftest else driver_output
- writer.write_crash_report(crashed_driver_output.crashed_process_name, crashed_driver_output.error)
+ writer.write_crash_log(crashed_driver_output.crash_log)
elif isinstance(failure, test_failures.FailureReftestMismatch):
writer.write_image_files(driver_output.image, expected_driver_output.image)
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
- image_diff = port.diff_image(driver_output.image, expected_driver_output.image, tolerance=0)[0]
- if image_diff:
- writer.write_image_diff_files(image_diff)
+ diff_image, diff_percent = port.diff_image(driver_output.image, expected_driver_output.image, tolerance=0)
+ if diff_image:
+ writer.write_image_diff_files(diff_image)
+ failure.diff_percent = diff_percent
else:
_log.warn('Can not get image diff. ImageDiff program might not work correctly.')
writer.copy_file(failure.reference_filename)
@@ -96,14 +96,15 @@ class TestResultWriter(object):
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
FILENAME_SUFFIX_IMAGE_DIFFS_HTML = "-diffs.html"
- def __init__(self, port, root_output_dir, test_name):
+ def __init__(self, filesystem, port, root_output_dir, test_name):
+ self._filesystem = filesystem
self._port = port
self._root_output_dir = root_output_dir
self._test_name = test_name
def _make_output_directory(self):
"""Creates the output directory (if needed) for a given test filename."""
- fs = self._port._filesystem
+ fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
fs.maybe_make_directory(fs.dirname(output_filename))
@@ -119,12 +120,12 @@ class TestResultWriter(object):
Return:
The absolute path to the output filename
"""
- fs = self._port._filesystem
+ fs = self._filesystem
output_filename = fs.join(self._root_output_dir, self._test_name)
return fs.splitext(output_filename)[0] + modifier
def _output_testname(self, modifier):
- fs = self._port._filesystem
+ fs = self._filesystem
return fs.splitext(fs.basename(self._test_name))[0] + modifier
def write_output_files(self, file_type, output, expected):
@@ -144,28 +145,24 @@ class TestResultWriter(object):
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
- fs = self._port._filesystem
+ fs = self._filesystem
if output is not None:
fs.write_binary_file(actual_filename, output)
if expected is not None:
fs.write_binary_file(expected_filename, expected)
def write_stderr(self, error):
- fs = self._port._filesystem
+ fs = self._filesystem
filename = self.output_filename(self.FILENAME_SUFFIX_STDERR + ".txt")
fs.maybe_make_directory(fs.dirname(filename))
fs.write_binary_file(filename, error)
- def write_crash_report(self, crashed_process_name, error):
- fs = self._port._filesystem
+ def write_crash_log(self, crash_log):
+ fs = self._filesystem
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
fs.maybe_make_directory(fs.dirname(filename))
- # FIXME: We shouldn't be grabbing private members of port.
- crash_logs = CrashLogs(fs)
- log = crash_logs.find_newest_log(crashed_process_name)
- # CrashLogs doesn't support every platform, so we fall back to
- # including the stderr output, which is admittedly somewhat redundant.
- fs.write_text_file(filename, log if log else error)
+ if crash_log is not None:
+ fs.write_text_file(filename, crash_log)
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
@@ -180,7 +177,7 @@ class TestResultWriter(object):
file_type = '.txt'
actual_filename = self.output_filename(self.FILENAME_SUFFIX_ACTUAL + file_type)
expected_filename = self.output_filename(self.FILENAME_SUFFIX_EXPECTED + file_type)
- fs = self._port._filesystem
+ fs = self._filesystem
# We treat diff output as binary. Diff output may contain multiple files
# in conflicting encodings.
diff = self._port.diff_text(expected_text, actual_text, expected_filename, actual_filename)
@@ -205,7 +202,7 @@ class TestResultWriter(object):
def write_image_diff_files(self, image_diff):
diff_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFF)
- fs = self._port._filesystem
+ fs = self._filesystem
fs.write_binary_file(diff_filename, image_diff)
diffs_html_filename = self.output_filename(self.FILENAME_SUFFIX_IMAGE_DIFFS_HTML)
@@ -262,11 +259,10 @@ Difference between images: <a href="%(diff_filename)s">diff</a><br>
'diff_filename': self._output_testname(self.FILENAME_SUFFIX_IMAGE_DIFF),
'prefix': self._output_testname(''),
}
- # FIXME: This seems like a text file, not a binary file.
- self._port._filesystem.write_binary_file(diffs_html_filename, html)
+ self._filesystem.write_text_file(diffs_html_filename, html)
def copy_file(self, src_filepath):
- fs = self._port._filesystem
+ fs = self._filesystem
assert fs.exists(src_filepath), 'src_filepath: %s' % src_filepath
dst_filepath = fs.join(self._root_output_dir, self._port.relative_test_filename(src_filepath))
self._make_output_directory()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
index c79846a2d..3b9b522ad 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
@@ -51,7 +51,7 @@ class TestResultWriterTest(unittest.TestCase):
driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1')
driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2')
failures = [test_failures.FailureReftestMismatch(test_reference_file)]
- test_result_writer.write_test_result(ImageDiffTestPort(host), test_name,
+ test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), test_name,
driver_output1, driver_output2, failures)
self.assertEqual([0], used_tolerance_values)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
index 22c0a5f04..a1e3bee70 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
@@ -38,7 +38,7 @@ from webkitpy.layout_tests.controllers import manager_worker_broker
from webkitpy.layout_tests.controllers import single_test_runner
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_results
-from webkitpy.layout_tests.views import printing
+from webkitpy.layout_tests.views import metered_stream
_log = logging.getLogger(__name__)
@@ -65,7 +65,7 @@ class Worker(manager_worker_broker.AbstractWorker):
self._driver = None
self._tests_run_file = None
self._tests_run_filename = None
- self._printer = None
+ self._meter = None
def __del__(self):
self.cleanup()
@@ -82,49 +82,65 @@ class Worker(manager_worker_broker.AbstractWorker):
tests_run_filename = self._filesystem.join(self._results_directory, "tests_run%d.txt" % self._worker_number)
self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
+ def _set_up_logging(self):
+ # The unix multiprocessing implementation clones the MeteredStream log handler
+ # into the child process, so we need to remove it before we can
+ # add a new one to get the correct pid logged.
+ root_logger = logging.getLogger()
+ handler_to_remove = None
+ for h in root_logger.handlers:
+ # log handlers don't have names until python 2.7.
+ if getattr(h, 'name', '') == metered_stream.LOG_HANDLER_NAME:
+ handler_to_remove = h
+ break
+ if handler_to_remove:
+ root_logger.removeHandler(handler_to_remove)
+
+ # FIXME: This won't work if the calling process is logging
+ # somewhere other than sys.stderr, but I'm not sure
+ # if this will be an issue in practice. Also, it would be
+ # nice if we trapped all of the messages for a given test
+ # and sent them back in finished_test() rather than logging
+ # them to stderr.
+ if not root_logger.handlers:
+ options = self._options
+ root_logger.setLevel(logging.DEBUG if options.verbose else logging.INFO)
+ self._meter = metered_stream.MeteredStream(sys.stderr, options.verbose, logger=root_logger)
+
+ def _set_up_host_and_port(self):
+ options = self._options
+ if options.platform and 'test' in options.platform:
+ # It is lame to import mocks into real code, but this allows us to use the test port in multi-process tests as well.
+ from webkitpy.common.host_mock import MockHost
+ host = MockHost()
+ else:
+ host = Host()
+ self._port = host.port_factory.get(options.platform, options)
+
def set_inline_arguments(self, port):
self._port = port
def run(self):
if not self._port:
- # We are running in a child process and need to create a new Host.
- if self._options.platform and 'test' in self._options.platform:
- # It is lame to import mocks into real code, but this allows us to use the test port in multi-process tests as well.
- from webkitpy.common.host_mock import MockHost
- host = MockHost()
- else:
- host = Host()
-
- options = self._options
- self._port = host.port_factory.get(options.platform, options)
-
- # The unix multiprocessing implementation clones the
- # log handler configuration into the child processes,
- # but the win implementation doesn't.
- configure_logging = (sys.platform == 'win32')
-
- # FIXME: This won't work if the calling process is logging
- # somewhere other than sys.stderr and sys.stdout, but I'm not sure
- # if this will be an issue in practice.
- self._printer = printing.Printer(self._port, options, sys.stderr, sys.stdout, configure_logging)
+ # We are running in a child process and need to initialize things.
+ self._set_up_logging()
+ self._set_up_host_and_port()
self.safe_init()
-
try:
_log.debug("%s starting" % self._name)
super(Worker, self).run()
finally:
+ self.kill_driver()
self._worker_connection.post_message('done')
- self.cleanup()
_log.debug("%s exiting" % self._name)
+ self.cleanup()
def handle_test_list(self, src, list_name, test_list):
start_time = time.time()
num_tests = 0
for test_input in test_list:
- #FIXME: When the DRT support also this function, that would be useful
- if self._port.driver_name() == "WebKitTestRunner" and self._port.get_option('skip_pixel_test_if_no_baseline') and self._port.get_option('pixel_tests'):
- test_input.should_run_pixel_test = (self._port.expected_image(test_input.test_name) != None)
+ self._update_test_input(test_input)
self._run_test(test_input)
num_tests += 1
self._worker_connection.yield_to_broker()
@@ -135,6 +151,18 @@ class Worker(manager_worker_broker.AbstractWorker):
def handle_stop(self, src):
self.stop_handling_messages()
+ def _update_test_input(self, test_input):
+ test_input.reference_files = self._port.reference_files(test_input.test_name)
+ if test_input.reference_files:
+ test_input.should_run_pixel_test = True
+ elif self._options.pixel_tests:
+ if self._options.skip_pixel_test_if_no_baseline:
+ test_input.should_run_pixel_test = bool(self._port.expected_image(test_input.test_name))
+ else:
+ test_input.should_run_pixel_test = True
+ else:
+ test_input.should_run_pixel_test = False
+
def _run_test(self, test_input):
test_timeout_sec = self.timeout(test_input)
start = time.time()
@@ -153,9 +181,9 @@ class Worker(manager_worker_broker.AbstractWorker):
if self._tests_run_file:
self._tests_run_file.close()
self._tests_run_file = None
- if self._printer:
- self._printer.cleanup()
- self._printer = None
+ if self._meter:
+ self._meter.cleanup()
+ self._meter = None
def timeout(self, test_input):
"""Compute the appropriate timeout value for a test."""
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker_unittest.py
index 6fd5202d9..ebb4e7a5b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/worker_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/worker_unittest.py
@@ -46,8 +46,7 @@ class WorkerTest(unittest.TestCase):
# if we didn't specify a port with the --platform flag.
worker_connection = FakeConnection()
worker = Worker(worker_connection, WorkerArguments(1, '/tmp', MockOptions(platform=None, print_options=None, verbose=False, batch_size=0)))
- worker._done = True
- worker.run()
+ worker._set_up_host_and_port()
self.assertNotEquals(worker._port, None)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index ce7ba45b7..0ff0d2f19 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -70,8 +70,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
self._expectations = expectations
self._result_summary = result_summary
- self._failures = dict((test_name, test_failures.determine_result_type(failures))
- for (test_name, failures) in result_summary.failures.iteritems())
+ self._failures = dict((test_name, result_summary.results[test_name].type) for test_name in result_summary.failures)
self._all_tests = all_tests
self._test_timings = dict((test_tuple.test_name, test_tuple.test_run_time) for test_tuple in test_timings)
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 4c4858b3b..69999f9e3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -319,12 +319,18 @@ class JSONResultsGeneratorBase(object):
# 120 seconds are more than enough to upload test results.
uploader = FileUploader(url, 120)
try:
- uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
+ response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
+ if response:
+ if response.code == 200:
+ _log.info("JSON uploaded.")
+ else:
+ _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
+ else:
+ _log.error("JSON upload failed; no response returned")
except Exception, err:
_log.error("Upload failed: %s" % err)
return
- _log.info("JSON files uploaded.")
def _get_test_timing(self, test_name):
"""Returns test timing data (elapsed time) in second
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
index 4375e09dc..e9607279b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py
@@ -29,16 +29,15 @@
import itertools
class TestConfiguration(object):
- def __init__(self, version, architecture, build_type, graphics_type):
+ def __init__(self, version, architecture, build_type):
self.version = version
self.architecture = architecture
self.build_type = build_type
- self.graphics_type = graphics_type
@classmethod
def category_order(cls):
"""The most common human-readable order in which the configuration properties are listed."""
- return ['version', 'architecture', 'build_type', 'graphics_type']
+ return ['version', 'architecture', 'build_type']
def items(self):
return self.__dict__.items()
@@ -47,14 +46,14 @@ class TestConfiguration(object):
return self.__dict__.keys()
def __str__(self):
- return ("<%(version)s, %(architecture)s, %(build_type)s, %(graphics_type)s>" %
+ return ("<%(version)s, %(architecture)s, %(build_type)s>" %
self.__dict__)
def __repr__(self):
- return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s', graphics_type='%(graphics_type)s')" % self.__dict__
+ return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s')" % self.__dict__
def __hash__(self):
- return hash(self.version + self.architecture + self.build_type + self.graphics_type)
+ return hash(self.version + self.architecture + self.build_type)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
@@ -116,19 +115,19 @@ class TestConfigurationConverter(object):
self._specifier_sorter = SpecifierSorter()
self._collapsing_sets_by_size = {}
self._junk_specifier_combinations = {}
- collapsing_sets_by_category = {}
+ self._collapsing_sets_by_category = {}
matching_sets_by_category = {}
for configuration in all_test_configurations:
for category, specifier in configuration.items():
self._specifier_to_configuration_set.setdefault(specifier, set()).add(configuration)
self._specifier_sorter.add_specifier(category, specifier)
- collapsing_sets_by_category.setdefault(category, set()).add(specifier)
+ self._collapsing_sets_by_category.setdefault(category, set()).add(specifier)
# FIXME: This seems extra-awful.
for cat2, spec2 in configuration.items():
if category == cat2:
continue
matching_sets_by_category.setdefault(specifier, {}).setdefault(cat2, set()).add(spec2)
- for collapsing_set in collapsing_sets_by_category.values():
+ for collapsing_set in self._collapsing_sets_by_category.values():
self._collapsing_sets_by_size.setdefault(len(collapsing_set), set()).add(frozenset(collapsing_set))
for specifier, sets_by_category in matching_sets_by_category.items():
@@ -166,16 +165,73 @@ class TestConfigurationConverter(object):
@classmethod
def collapse_macros(cls, macros_dict, specifiers_list):
- for i in range(len(specifiers_list)):
- for macro_specifier, macro in macros_dict.items():
- specifiers_set = set(specifiers_list[i])
+ for macro_specifier, macro in macros_dict.items():
+ if len(macro) == 1:
+ continue
+
+ for combination in cls.combinations(specifiers_list, len(macro)):
+ if cls.symmetric_difference(combination) == set(macro):
+ for item in combination:
+ specifiers_list.remove(item)
+ new_specifier_set = cls.intersect_combination(combination)
+ new_specifier_set.add(macro_specifier)
+ specifiers_list.append(frozenset(new_specifier_set))
+
+ def collapse_individual_specifier_set(macro_specifier, macro):
+ specifiers_to_remove = []
+ specifiers_to_add = []
+ for specifier_set in specifiers_list:
macro_set = set(macro)
- if specifiers_set >= macro_set:
- specifiers_list[i] = frozenset((specifiers_set - macro_set) | set([macro_specifier]))
+ if macro_set.intersection(specifier_set) == macro_set:
+ specifiers_to_remove.append(specifier_set)
+ specifiers_to_add.append(frozenset((set(specifier_set) - macro_set) | set([macro_specifier])))
+ for specifier in specifiers_to_remove:
+ specifiers_list.remove(specifier)
+ for specifier in specifiers_to_add:
+ specifiers_list.append(specifier)
+
+ for macro_specifier, macro in macros_dict.items():
+ collapse_individual_specifier_set(macro_specifier, macro)
+
+ # FIXME: itertools.combinations in buggy in Python 2.6.1 (the version that ships on SL).
+ # It seems to be okay in 2.6.5 or later; until then, this is the implementation given
+ # in http://docs.python.org/library/itertools.html (from 2.7).
+ @staticmethod
+ def combinations(iterable, r):
+ # combinations('ABCD', 2) --> AB AC AD BC BD CD
+ # combinations(range(4), 3) --> 012 013 023 123
+ pool = tuple(iterable)
+ n = len(pool)
+ if r > n:
+ return
+ indices = range(r)
+ yield tuple(pool[i] for i in indices)
+ while True:
+ for i in reversed(range(r)):
+ if indices[i] != i + n - r:
+ break
+ else:
+ return
+ indices[i] += 1
+ for j in range(i + 1, r):
+ indices[j] = indices[j - 1] + 1
+ yield tuple(pool[i] for i in indices)
+
+ @classmethod
+ def intersect_combination(cls, combination):
+ return reduce(set.intersection, [set(specifiers) for specifiers in combination])
+
+ @classmethod
+ def symmetric_difference(cls, iterable):
+ union = set()
+ intersection = iterable[0]
+ for item in iterable:
+ union = union | item
+ intersection = intersection.intersection(item)
+ return union - intersection
def to_specifiers_list(self, test_configuration_set):
"""Convert a set of TestConfiguration instances into one or more list of specifiers."""
-
# Easy out: if the set is all configurations, the modifier is empty.
if len(test_configuration_set) == len(self._all_test_configurations):
return [[]]
@@ -189,37 +245,31 @@ class TestConfigurationConverter(object):
values -= junk_specifier_set
specifiers_list.append(frozenset(values))
- def intersect_combination(combination):
- return reduce(set.intersection, [set(specifiers) for specifiers in combination])
-
- def symmetric_difference(iterable):
- return reduce(lambda x, y: x ^ y, iterable)
-
def try_collapsing(size, collapsing_sets):
if len(specifiers_list) < size:
return False
- for combination in itertools.combinations(specifiers_list, size):
- if symmetric_difference(combination) in collapsing_sets:
+ for combination in self.combinations(specifiers_list, size):
+ if self.symmetric_difference(combination) in collapsing_sets:
for item in combination:
specifiers_list.remove(item)
- specifiers_list.append(frozenset(intersect_combination(combination)))
+ specifiers_list.append(frozenset(self.intersect_combination(combination)))
return True
return False
# 2) Collapse specifier sets with common specifiers:
- # (xp, release, gpu), (xp, release, cpu) --> (xp, x86, release)
+ # (xp, release), (xp, debug) --> (xp, x86)
for size, collapsing_sets in self._collapsing_sets_by_size.items():
while try_collapsing(size, collapsing_sets):
pass
- def try_abbreviating():
+ def try_abbreviating(collapsing_sets):
if len(specifiers_list) < 2:
return False
- for combination in itertools.combinations(specifiers_list, 2):
+ for combination in self.combinations(specifiers_list, 2):
for collapsing_set in collapsing_sets:
- diff = symmetric_difference(combination)
+ diff = self.symmetric_difference(combination)
if diff <= collapsing_set:
- common = intersect_combination(combination)
+ common = self.intersect_combination(combination)
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(common | diff))
@@ -228,11 +278,30 @@ class TestConfigurationConverter(object):
# 3) Abbreviate specifier sets by combining specifiers across categories.
# (xp, release), (win7, release) --> (xp, win7, release)
- while try_abbreviating():
+ while try_abbreviating(self._collapsing_sets_by_size.values()):
pass
+
# 4) Substitute specifier subsets that match macros witin each set:
# (xp, vista, win7, release) -> (win, release)
self.collapse_macros(self._configuration_macros, specifiers_list)
+ macro_keys = set(self._configuration_macros.keys())
+
+ # 5) Collapsing macros may have created combinations the can now be abbreviated.
+ # (xp, release), (linux, x86, release), (linux, x86_64, release) --> (xp, release), (linux, release) --> (xp, linux, release)
+ while try_abbreviating([self._collapsing_sets_by_category['version'] | macro_keys]):
+ pass
+
+ # 6) Remove cases where we have collapsed but have all macros.
+ # (android, win, mac, linux, release) --> (release)
+ specifiers_to_remove = []
+ for specifier_set in specifiers_list:
+ if macro_keys <= specifier_set:
+ specifiers_to_remove.append(specifier_set)
+
+ for specifier_set in specifiers_to_remove:
+ specifiers_list.remove(specifier_set)
+ specifiers_list.append(frozenset(specifier_set - macro_keys))
+
return specifiers_list
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
index 192836849..c367b7591 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
@@ -35,73 +35,69 @@ from webkitpy.layout_tests.models.test_configuration import *
def make_mock_all_test_configurations_set():
all_test_configurations = set()
- for version, architecture in (('snowleopard', 'x86'), ('xp', 'x86'), ('win7', 'x86'), ('lucid', 'x86'), ('lucid', 'x86_64')):
+ for version, architecture in (('snowleopard', 'x86'), ('xp', 'x86'), ('win7', 'x86'), ('vista', 'x86'), ('lucid', 'x86'), ('lucid', 'x86_64')):
for build_type in ('debug', 'release'):
- for graphics_type in ('cpu', 'gpu'):
- all_test_configurations.add(TestConfiguration(version, architecture, build_type, graphics_type))
+ all_test_configurations.add(TestConfiguration(version, architecture, build_type))
return all_test_configurations
MOCK_MACROS = {
'mac': ['snowleopard'],
- 'win': ['xp', 'win7'],
+ 'win': ['xp', 'vista', 'win7'],
'linux': ['lucid'],
}
class TestConfigurationTest(unittest.TestCase):
def test_items(self):
- config = TestConfiguration('xp', 'x86', 'release', 'cpu')
+ config = TestConfiguration('xp', 'x86', 'release')
result_config_dict = {}
for category, specifier in config.items():
result_config_dict[category] = specifier
- self.assertEquals({'version': 'xp', 'architecture': 'x86', 'build_type': 'release', 'graphics_type': 'cpu'}, result_config_dict)
+ self.assertEquals({'version': 'xp', 'architecture': 'x86', 'build_type': 'release'}, result_config_dict)
def test_keys(self):
- config = TestConfiguration('xp', 'x86', 'release', 'cpu')
+ config = TestConfiguration('xp', 'x86', 'release')
result_config_keys = []
for category in config.keys():
result_config_keys.append(category)
- self.assertEquals(set(['graphics_type', 'version', 'architecture', 'build_type']), set(result_config_keys))
+ self.assertEquals(set(['version', 'architecture', 'build_type']), set(result_config_keys))
def test_str(self):
- config = TestConfiguration('xp', 'x86', 'release', 'cpu')
- self.assertEquals('<xp, x86, release, cpu>', str(config))
+ config = TestConfiguration('xp', 'x86', 'release')
+ self.assertEquals('<xp, x86, release>', str(config))
def test_repr(self):
- config = TestConfiguration('xp', 'x86', 'release', 'cpu')
- self.assertEquals("TestConfig(version='xp', architecture='x86', build_type='release', graphics_type='cpu')", repr(config))
+ config = TestConfiguration('xp', 'x86', 'release')
+ self.assertEquals("TestConfig(version='xp', architecture='x86', build_type='release')", repr(config))
def test_hash(self):
config_dict = {}
- config_dict[TestConfiguration('xp', 'x86', 'release', 'cpu')] = True
- self.assertTrue(TestConfiguration('xp', 'x86', 'release', 'cpu') in config_dict)
- self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release', 'cpu')])
- config_dict[TestConfiguration('xp', 'x86', 'release', 'gpu')] = False
- self.assertFalse(config_dict[TestConfiguration('xp', 'x86', 'release', 'gpu')])
+ config_dict[TestConfiguration('xp', 'x86', 'release')] = True
+ self.assertTrue(TestConfiguration('xp', 'x86', 'release') in config_dict)
+ self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release')])
def query_unknown_key():
- config_dict[TestConfiguration('xp', 'x86', 'debug', 'gpu')]
+ config_dict[TestConfiguration('xp', 'x86', 'debug')]
self.assertRaises(KeyError, query_unknown_key)
- self.assertTrue(TestConfiguration('xp', 'x86', 'release', 'gpu') in config_dict)
- self.assertFalse(TestConfiguration('xp', 'x86', 'debug', 'gpu') in config_dict)
- configs_list = [TestConfiguration('xp', 'x86', 'release', 'gpu'), TestConfiguration('xp', 'x86', 'debug', 'gpu'), TestConfiguration('xp', 'x86', 'debug', 'gpu')]
+ self.assertTrue(TestConfiguration('xp', 'x86', 'release') in config_dict)
+ self.assertFalse(TestConfiguration('xp', 'x86', 'debug') in config_dict)
+ configs_list = [TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'), TestConfiguration('xp', 'x86', 'debug')]
self.assertEquals(len(configs_list), 3)
self.assertEquals(len(set(configs_list)), 2)
def test_eq(self):
- self.assertEquals(TestConfiguration('xp', 'x86', 'release', 'cpu'), TestConfiguration('xp', 'x86', 'release', 'cpu'))
+ self.assertEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'release'))
host = MockHost()
test_port = host.port_factory.get('test-win-xp', None)
- self.assertNotEquals(TestConfiguration('xp', 'x86', 'release', 'gpu'), TestConfiguration('xp', 'x86', 'release', 'cpu'))
- self.assertNotEquals(TestConfiguration('xp', 'x86', 'debug', 'cpu'), TestConfiguration('xp', 'x86', 'release', 'cpu'))
+ self.assertNotEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'))
def test_values(self):
- config = TestConfiguration('xp', 'x86', 'release', 'cpu')
+ config = TestConfiguration('xp', 'x86', 'release')
result_config_values = []
for value in config.values():
result_config_values.append(value)
- self.assertEquals(set(['xp', 'x86', 'release', 'cpu']), set(result_config_values))
+ self.assertEquals(set(['xp', 'x86', 'release']), set(result_config_values))
class SpecifierSorterTest(unittest.TestCase):
@@ -143,7 +139,6 @@ class SpecifierSorterTest(unittest.TestCase):
def test_specifier_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEquals(sorter.specifier_priority('x86'), 1)
- self.assertEquals(sorter.specifier_priority('gpu'), 3)
self.assertEquals(sorter.specifier_priority('snowleopard'), 0)
def test_sort_specifiers(self):
@@ -151,9 +146,9 @@ class SpecifierSorterTest(unittest.TestCase):
self.assertEquals(sorter.sort_specifiers(set()), [])
self.assertEquals(sorter.sort_specifiers(set(['x86'])), ['x86'])
self.assertEquals(sorter.sort_specifiers(set(['x86', 'win7'])), ['win7', 'x86'])
- self.assertEquals(sorter.sort_specifiers(set(['gpu', 'x86', 'debug', 'win7'])), ['win7', 'x86', 'debug', 'gpu'])
- self.assertEquals(sorter.sort_specifiers(set(['gpu', 'snowleopard', 'x86', 'debug', 'win7'])), ['snowleopard', 'win7', 'x86', 'debug', 'gpu'])
- self.assertEquals(sorter.sort_specifiers(set(['gpu', 'x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug', 'gpu'])
+ self.assertEquals(sorter.sort_specifiers(set(['x86', 'debug', 'win7'])), ['win7', 'x86', 'debug'])
+ self.assertEquals(sorter.sort_specifiers(set(['snowleopard', 'x86', 'debug', 'win7'])), ['snowleopard', 'win7', 'x86', 'debug'])
+ self.assertEquals(sorter.sort_specifiers(set(['x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug'])
class TestConfigurationConverterTest(unittest.TestCase):
@@ -161,6 +156,10 @@ class TestConfigurationConverterTest(unittest.TestCase):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
+ def test_symmetric_difference(self):
+ self.assertEquals(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c'])]), set(['a', 'c']))
+ self.assertEquals(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c']), set(['b', 'd'])]), set(['a', 'c', 'd']))
+
def test_to_config_set(self):
converter = TestConfigurationConverter(self._all_test_configurations)
@@ -177,148 +176,135 @@ class TestConfigurationConverterTest(unittest.TestCase):
self.assertEquals(converter.to_config_set(set(['xp', 'x86_64'])), set())
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release'),
])
self.assertEquals(converter.to_config_set(set(['xp', 'release'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
])
- self.assertEquals(converter.to_config_set(set(['gpu', 'release'])), configs_to_match)
+ self.assertEquals(converter.to_config_set(set(['release'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'cpu'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
])
self.assertEquals(converter.to_config_set(set(['x86_64'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'cpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'debug', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'debug', 'cpu'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'debug', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'debug', 'cpu'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard', 'x86'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('lucid', 'x86_64', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86', 'release', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'cpu'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
])
- self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard', 'release', 'cpu'])), configs_to_match)
+ self.assertEquals(converter.to_config_set(set(['lucid', 'snowleopard', 'release'])), configs_to_match)
def test_macro_expansion(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
])
self.assertEquals(converter.to_config_set(set(['win', 'release'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
])
- self.assertEquals(converter.to_config_set(set(['win', 'lucid', 'release', 'gpu'])), configs_to_match)
+ self.assertEquals(converter.to_config_set(set(['win', 'lucid', 'release'])), configs_to_match)
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
])
- self.assertEquals(converter.to_config_set(set(['win', 'mac', 'release', 'gpu'])), configs_to_match)
+ self.assertEquals(converter.to_config_set(set(['win', 'mac', 'release'])), configs_to_match)
def test_to_specifier_lists(self):
- converter = TestConfigurationConverter(self._all_test_configurations)
+ converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
self.assertEquals(converter.to_specifiers_list(set(self._all_test_configurations)), [[]])
self.assertEquals(converter.to_specifiers_list(set()), [])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release'),
])
self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
- TestConfiguration('xp', 'x86', 'debug', 'gpu'),
- TestConfiguration('xp', 'x86', 'debug', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'debug'),
])
self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['xp'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'cpu'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('xp', 'x86', 'release'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['debug', 'x86_64', 'lucid', 'cpu']), set(['release', 'gpu', 'xp'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'x86_64', 'linux'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'cpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86', 'debug'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'lucid'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'linux'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'gpu'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['xp', 'snowleopard', 'release', 'gpu'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['xp', 'mac', 'release'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'debug'),
+ TestConfiguration('lucid', 'x86', 'release'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['release', 'gpu', 'lucid', 'x86']), set(['gpu', 'win7']), set(['release', 'gpu', 'xp', 'snowleopard'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win7']), set(['release', 'linux', 'x86']), set(['release', 'xp', 'mac'])])
def test_macro_collapsing(self):
macros = {'foo': ['bar', 'baz'], 'people': ['bob', 'alice', 'john']}
@@ -343,27 +329,43 @@ class TestConfigurationConverterTest(unittest.TestCase):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'cpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
])
self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
+ ])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release', 'gpu'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
])
- self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release', 'gpu'])])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
+
+ configs_to_match = set([
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'release'),
+ ])
+ self.assertEquals(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
def test_specifier_converter_access(self):
specifier_sorter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS).specifier_sorter()
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index ea4837d93..2420b9fdf 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -119,7 +119,7 @@ class TestExpectationSerializer(object):
self._test_configuration_converter = test_configuration_converter
self._parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
- def to_string(self, expectation_line):
+ def to_string(self, expectation_line, include_modifiers=True, include_expectations=True, include_comment=True):
if expectation_line.is_invalid():
return expectation_line.original_string or ''
@@ -135,7 +135,15 @@ class TestExpectationSerializer(object):
result.append(self._format_result(modifiers, expectation_line.name, expectations, expectation_line.comment))
return "\n".join(result) if result else None
- return self._format_result(" ".join(expectation_line.modifiers), expectation_line.name, " ".join(expectation_line.expectations), expectation_line.comment)
+ return self._format_result(" ".join(expectation_line.modifiers),
+ expectation_line.name,
+ " ".join(expectation_line.expectations),
+ expectation_line.comment,
+ include_modifiers, include_expectations, include_comment)
+
+ def to_csv(self, expectation_line):
+ # Note that this doesn't include the comments.
+ return '%s,%s,%s' % (expectation_line.name, ' '.join(expectation_line.modifiers), ' '.join(expectation_line.expectations))
def _parsed_expectations_string(self, expectation_line):
result = []
@@ -154,9 +162,14 @@ class TestExpectationSerializer(object):
return ' '.join(result)
@classmethod
- def _format_result(cls, modifiers, name, expectations, comment):
- result = "%s : %s = %s" % (modifiers.upper(), name, expectations.upper())
- if comment is not None:
+ def _format_result(cls, modifiers, name, expectations, comment, include_modifiers=True, include_expectations=True, include_comment=True):
+ result = ''
+ if include_modifiers:
+ result += '%s : ' % modifiers.upper()
+ result += name
+ if include_expectations:
+ result += ' = %s' % expectations.upper()
+ if include_comment and comment is not None:
result += " //%s" % comment
return result
@@ -165,7 +178,9 @@ class TestExpectationSerializer(object):
serializer = cls(test_configuration_converter)
def serialize(expectation_line):
- if not reconstitute_only_these or expectation_line in reconstitute_only_these:
+ # If reconstitute_only_these is an empty list, we want to return original_string.
+ # So we need to compare reconstitute_only_these to None, not just check if it's falsey.
+ if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
return serializer.to_string(expectation_line)
return expectation_line.original_string
@@ -393,6 +408,7 @@ class TestExpectationLine:
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
+ expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
@@ -443,6 +459,24 @@ class TestExpectationsModel(object):
return tests
+ def get_test_set_for_keyword(self, keyword):
+ # FIXME: get_test_set() is an awkward public interface because it requires
+ # callers to know the difference between modifiers and expectations. We
+ # should replace that with this where possible.
+ expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
+ if expectation_enum is not None:
+ return self._expectation_to_tests[expectation_enum]
+ modifier_enum = TestExpectations.MODIFIERS.get(keyword.lower(), None)
+ if modifier_enum is not None:
+ return self._modifier_to_tests[modifier_enum]
+
+ # We must not have an index on this modifier.
+ matching_tests = set()
+ for test, modifiers in self._test_to_modifiers.iteritems():
+ if keyword.lower() in modifiers:
+ matching_tests.add(test)
+ return matching_tests
+
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
@@ -456,6 +490,10 @@ class TestExpectationsModel(object):
def has_modifier(self, test, modifier):
return test in self._modifier_to_tests[modifier]
+ def has_keyword(self, test, keyword):
+ return (keyword.upper() in self.get_expectations_string(test) or
+ keyword.lower() in self.get_modifiers(test))
+
def has_test(self, test):
return test in self._test_to_expectation_line
@@ -465,21 +503,21 @@ class TestExpectationsModel(object):
def get_expectations(self, test):
return self._test_to_expectations[test]
- def add_expectation_line(self, expectation_line, overrides_allowed):
+ def add_expectation_line(self, expectation_line, in_overrides=False, in_skipped=False):
"""Returns a list of warnings encountered while matching modifiers."""
if expectation_line.is_invalid():
return
for test in expectation_line.matching_tests:
- if self._already_seen_better_match(test, expectation_line, overrides_allowed):
+ if not in_skipped and self._already_seen_better_match(test, expectation_line, in_overrides):
continue
self._clear_expectations_for_test(test, expectation_line)
self._test_to_expectation_line[test] = expectation_line
- self._add_test(test, expectation_line, overrides_allowed)
+ self._add_test(test, expectation_line, in_overrides)
- def _add_test(self, test, expectation_line, overrides_allowed):
+ def _add_test(self, test, expectation_line, in_overrides):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
@@ -489,8 +527,7 @@ class TestExpectationsModel(object):
Args:
test: test to add
expectation_line: expectation to add
- overrides_allowed: whether we're parsing the regular expectations
- or the overridding expectations"""
+ in_overrides: whether we're parsing the regular expectations or the overridding expectations"""
self._test_to_expectations[test] = expectation_line.parsed_expectations
for expectation in expectation_line.parsed_expectations:
self._expectation_to_tests[expectation].add(test)
@@ -514,7 +551,7 @@ class TestExpectationsModel(object):
else:
self._result_type_to_tests[FAIL].add(test)
- if overrides_allowed:
+ if in_overrides:
self._overridding_tests.add(test)
def _clear_expectations_for_test(self, test, expectation_line):
@@ -539,7 +576,7 @@ class TestExpectationsModel(object):
if test in set_of_tests:
set_of_tests.remove(test)
- def _already_seen_better_match(self, test, expectation_line, overrides_allowed):
+ def _already_seen_better_match(self, test, expectation_line, in_overrides):
"""Returns whether we've seen a better match already in the file.
Returns True if we've already seen a expectation_line.name that matches more of the test
@@ -560,7 +597,7 @@ class TestExpectationsModel(object):
# This path matches more of the test.
return False
- if overrides_allowed and test not in self._overridding_tests:
+ if in_overrides and test not in self._overridding_tests:
# We have seen this path, but that's okay because it is
# in the overrides and the earlier path was in the
# expectations (not the overrides).
@@ -569,7 +606,7 @@ class TestExpectationsModel(object):
# At this point we know we have seen a previous exact match on this
# base path, so we need to check the two sets of modifiers.
- if overrides_allowed:
+ if in_overrides:
expectation_source = "override"
else:
expectation_source = "expectation"
@@ -657,8 +694,7 @@ class TestExpectations(object):
IMAGE_PLUS_TEXT: ('image and text mismatch',
'image and text mismatch'),
AUDIO: ('audio mismatch', 'audio mismatch'),
- CRASH: ('DumpRenderTree crash',
- 'DumpRenderTree crashes'),
+ CRASH: ('crash', 'crashes'),
TIMEOUT: ('test timed out', 'tests timed out'),
MISSING: ('no expected result found',
'no expected results found')}
@@ -687,7 +723,8 @@ class TestExpectations(object):
return cls.EXPECTATIONS.get(string.lower())
def __init__(self, port, tests, expectations,
- test_config, is_lint_mode=False, overrides=None):
+ test_config, is_lint_mode=False, overrides=None,
+ skipped_tests=None):
"""Loads and parses the test expectations given in the string.
Args:
port: handle to object containing platform-specific functionality
@@ -702,6 +739,7 @@ class TestExpectations(object):
entries in |expectations|. This is used by callers
that need to manage two sets of expectations (e.g., upstream
and downstream expectations).
+ skipped_tests: test paths to skip.
"""
self._full_test_list = tests
self._test_config = test_config
@@ -712,20 +750,23 @@ class TestExpectations(object):
self._skipped_tests_warnings = []
self._expectations = self._parser.parse(expectations)
- self._add_expectations(self._expectations, overrides_allowed=False)
- self._add_skipped_tests(port.skipped_tests(tests))
+ self._add_expectations(self._expectations, in_overrides=False)
if overrides:
overrides_expectations = self._parser.parse(overrides)
- self._add_expectations(overrides_expectations, overrides_allowed=True)
+ self._add_expectations(overrides_expectations, in_overrides=True)
self._expectations += overrides_expectations
+ self._add_skipped_tests(skipped_tests or [])
+
self._has_warnings = False
self._report_warnings()
self._process_tests_without_expectations()
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
+ def model(self):
+ return self._model
def get_rebaselining_failures(self):
return (self._model.get_test_set(REBASELINE, FAIL) |
@@ -809,11 +850,34 @@ class TestExpectations(object):
if self._full_test_list:
for test in self._full_test_list:
if not self._model.has_test(test):
- self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test), overrides_allowed=False)
+ self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
def has_warnings(self):
return self._has_warnings
+ def remove_configuration_from_test(self, test, test_configuration):
+ expectations_to_remove = []
+ modified_expectations = []
+
+ for expectation in self._expectations:
+ if expectation.name != test or expectation.is_flaky() or not expectation.parsed_expectations:
+ continue
+ if iter(expectation.parsed_expectations).next() not in (FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO):
+ continue
+ if test_configuration not in expectation.matching_configurations:
+ continue
+
+ expectation.matching_configurations.remove(test_configuration)
+ if expectation.matching_configurations:
+ modified_expectations.append(expectation)
+ else:
+ expectations_to_remove.append(expectation)
+
+ for expectation in expectations_to_remove:
+ self._expectations.remove(expectation)
+
+ return TestExpectationSerializer.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
+
def remove_rebaselined_tests(self, except_these_tests):
"""Returns a copy of the expectations with the tests removed."""
def without_rebaseline_modifier(expectation):
@@ -821,13 +885,13 @@ class TestExpectations(object):
return TestExpectationSerializer.list_to_string(filter(without_rebaseline_modifier, self._expectations))
- def _add_expectations(self, expectation_list, overrides_allowed):
+ def _add_expectations(self, expectation_list, in_overrides):
for expectation_line in expectation_list:
if not expectation_line.expectations:
continue
if self._is_lint_mode or self._test_config in expectation_line.matching_configurations:
- self._model.add_expectation_line(expectation_line, overrides_allowed)
+ self._model.add_expectation_line(expectation_line, in_overrides)
def _add_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
@@ -835,5 +899,7 @@ class TestExpectations(object):
for index, test in enumerate(self._expectations, start=1):
if test.name and test.name in tests_to_skip:
self._skipped_tests_warnings.append(':%d %s is also in a Skipped file.' % (index, test.name))
+
for test_name in tests_to_skip:
- self._model.add_expectation_line(self._parser.expectation_for_skipped_test(test_name), overrides_allowed=True)
+ expectation_line = self._parser.expectation_for_skipped_test(test_name)
+ self._model.add_expectation_line(expectation_line, in_skipped=True)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index 00f1bfd4c..39a46137f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -225,12 +225,6 @@ SKIP : failures/expected/image.html""", is_lint_mode=True)
'BUG_TEST DEBUG : failures/expected/text.html = TEXT\nBUG_TEST DEBUG : failures/expected/text.html = TEXT',
is_lint_mode=True)
- def test_error_on_different_graphics_type(self):
- # parse_exp uses a CPU port. Assert errors on GPU show up in lint mode.
- self.assertRaises(ParseError, self.parse_exp,
- 'BUG_TEST GPU : failures/expected/text.html = TEXT\nBUG_TEST GPU : failures/expected/text.html = TEXT',
- is_lint_mode=True)
-
def test_overrides(self):
self.parse_exp("BUG_EXP: failures/expected/text.html = TEXT",
"BUG_OVERRIDE : failures/expected/text.html = IMAGE")
@@ -266,19 +260,41 @@ SKIP : failures/expected/image.html""", is_lint_mode=True)
'failures/expected/text.html') in
self._exp.get_tests_with_result_type(SKIP))
- def test_add_skipped_tests(self):
- port = MockHost().port_factory.get('qt')
- port._filesystem.files[port._filesystem.join(port.layout_tests_dir(), 'platform/qt/Skipped')] = 'failures/expected/text.html'
- port._filesystem.files[port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html')] = 'foo'
- expectations = TestExpectations(port, tests=['failures/expected/text.html'], expectations='', test_config=port.test_configuration())
- self.assertEquals(expectations.get_modifiers('failures/expected/text.html'), [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER])
- self.assertEquals(expectations.get_expectations('failures/expected/text.html'), set([PASS]))
- def test_add_skipped_tests_duplicate(self):
+class SkippedTests(Base):
+ def check(self, expectations, overrides, skips, lint=False):
port = MockHost().port_factory.get('qt')
- port._filesystem.files[port._filesystem.join(port.layout_tests_dir(), 'platform/qt/Skipped')] = 'failures/expected/text.html'
- port._filesystem.files[port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html')] = 'foo'
- self.assertRaises(ParseError, TestExpectations, port, tests=['failures/expected/text.html'], expectations='BUGX : failures/expected/text.html = text\n', test_config=port.test_configuration(), is_lint_mode=True)
+ port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), 'failures/expected/text.html'), 'foo')
+ exp = TestExpectations(port, tests=['failures/expected/text.html'],
+ expectations=expectations, overrides=overrides, is_lint_mode=lint,
+ test_config=port.test_configuration(), skipped_tests=set(skips))
+
+ # Check that the expectation is for BUG_DUMMY SKIP : ... = PASS
+ self.assertEquals(exp.get_modifiers('failures/expected/text.html'),
+ [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER])
+ self.assertEquals(exp.get_expectations('failures/expected/text.html'), set([PASS]))
+
+ def test_skipped_tests_work(self):
+ self.check(expectations='', overrides=None, skips=['failures/expected/text.html'])
+
+ def test_duplicate_skipped_test_fails_lint(self):
+ self.assertRaises(ParseError, self.check, expectations='BUGX : failures/expected/text.html = text\n', overrides=None, skips=['failures/expected/text.html'], lint=True)
+
+ def test_skipped_file_overrides_expectations(self):
+ self.check(expectations='BUGX : failures/expected/text.html = TEXT\n', overrides=None,
+ skips=['failures/expected/text.html'])
+
+ def test_skipped_dir_overrides_expectations(self):
+ self.check(expectations='BUGX : failures/expected/text.html = TEXT\n', overrides=None,
+ skips=['failures/expected'])
+
+ def test_skipped_file_overrides_overrides(self):
+ self.check(expectations='', overrides='BUGX : failures/expected/text.html = TEXT\n',
+ skips=['failures/expected/text.html'])
+
+ def test_skipped_dir_overrides_overrides(self):
+ self.check(expectations='', overrides='BUGX : failures/expected/text.html = TEXT\n',
+ skips=['failures/expected'])
class ExpectationSyntaxTests(Base):
@@ -386,6 +402,53 @@ BUGX : failures/expected/text.html = TEXT
"BUG_TEST XP : passes/text.html = TEXT\n")
+class RemoveConfigurationsTest(Base):
+ def test_remove(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ expectations = TestExpectations(test_port,
+ tests=self.get_basic_tests(),
+ expectations="""BUGX LINUX WIN RELEASE : failures/expected/foo.html = TEXT
+BUGY WIN MAC DEBUG : failures/expected/foo.html = CRASH
+""",
+ test_config=test_config,
+ is_lint_mode=False,
+ overrides=None)
+
+ actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+
+ self.assertEqual("""BUGX LINUX VISTA WIN7 RELEASE : failures/expected/foo.html = TEXT
+BUGY WIN MAC DEBUG : failures/expected/foo.html = CRASH
+""", actual_expectations)
+
+ def test_remove_line(self):
+ host = MockHost()
+ test_port = host.port_factory.get('test-win-xp', None)
+ test_port.test_exists = lambda test: True
+ test_port.test_isfile = lambda test: True
+
+ test_config = test_port.test_configuration()
+ expectations = TestExpectations(test_port,
+ tests=None,
+ expectations="""BUGX WIN RELEASE : failures/expected/foo.html = TEXT
+BUGY WIN DEBUG : failures/expected/foo.html = CRASH
+""",
+ test_config=test_config,
+ is_lint_mode=False,
+ overrides=None)
+
+ actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', test_config)
+ actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-vista', None).test_configuration())
+ actual_expectations = expectations.remove_configuration_from_test('failures/expected/foo.html', host.port_factory.get('test-win-win7', None).test_configuration())
+
+ self.assertEqual("""BUGY WIN DEBUG : failures/expected/foo.html = CRASH
+""", actual_expectations)
+
+
class RebaseliningTest(Base):
"""Test rebaselining-specific functionality."""
def assertRemove(self, input_expectations, tests, expected_expectations):
@@ -525,12 +588,10 @@ class TestExpectationSerializerTests(unittest.TestCase):
expectation_line.name = 'test/name/for/realz.html'
expectation_line.parsed_expectations = set([IMAGE])
self.assertEqual(self._serializer.to_string(expectation_line), None)
- expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release', 'cpu')])
- self.assertEqual(self._serializer.to_string(expectation_line), 'BUGX XP RELEASE CPU : test/name/for/realz.html = IMAGE')
- expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release', 'cpu'), TestConfiguration('xp', 'x86', 'release', 'gpu')])
+ expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release')])
self.assertEqual(self._serializer.to_string(expectation_line), 'BUGX XP RELEASE : test/name/for/realz.html = IMAGE')
- expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release', 'cpu'), TestConfiguration('xp', 'x86', 'debug', 'gpu')])
- self.assertEqual(self._serializer.to_string(expectation_line), 'BUGX XP RELEASE CPU : test/name/for/realz.html = IMAGE\nBUGX XP DEBUG GPU : test/name/for/realz.html = IMAGE')
+ expectation_line.matching_configurations = set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')])
+ self.assertEqual(self._serializer.to_string(expectation_line), 'BUGX XP : test/name/for/realz.html = IMAGE')
def test_parsed_expectations_string(self):
expectation_line = TestExpectationLine()
@@ -610,13 +671,12 @@ class TestExpectationSerializerTests(unittest.TestCase):
if reconstitute:
reconstitute_only_these.append(expectation_line)
- add_line(set([TestConfiguration('xp', 'x86', 'release', 'cpu')]), False)
- add_line(set([TestConfiguration('xp', 'x86', 'release', 'cpu'), TestConfiguration('xp', 'x86', 'release', 'gpu')]), True)
- add_line(set([TestConfiguration('xp', 'x86', 'release', 'cpu'), TestConfiguration('xp', 'x86', 'debug', 'gpu')]), False)
+ add_line(set([TestConfiguration('xp', 'x86', 'release')]), True)
+ add_line(set([TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug')]), False)
serialized = TestExpectationSerializer.list_to_string(lines, self._converter)
- self.assertEquals(serialized, "BUGX XP RELEASE CPU : Yay = IMAGE\nBUGX XP RELEASE : Yay = IMAGE\nBUGX XP RELEASE CPU : Yay = IMAGE\nBUGX XP DEBUG GPU : Yay = IMAGE")
+ self.assertEquals(serialized, "BUGX XP RELEASE : Yay = IMAGE\nBUGX XP : Yay = IMAGE")
serialized = TestExpectationSerializer.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
- self.assertEquals(serialized, "Nay\nBUGX XP RELEASE : Yay = IMAGE\nNay")
+ self.assertEquals(serialized, "BUGX XP RELEASE : Yay = IMAGE\nNay")
def test_string_whitespace_stripping(self):
self.assert_round_trip('\n', '')
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
index f2f246e30..029094ec4 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
@@ -54,6 +54,8 @@ def determine_result_type(failure_list):
return test_expectations.CRASH
elif FailureTimeout in failure_types:
return test_expectations.TIMEOUT
+ elif FailureEarlyExit in failure_types:
+ return test_expectations.SKIP
elif (FailureMissingResult in failure_types or
FailureMissingImage in failure_types or
FailureMissingImageHash in failure_types or
@@ -85,8 +87,7 @@ class TestFailure(object):
"""Creates a TestFailure object from the specified string."""
return cPickle.loads(s)
- @staticmethod
- def message():
+ def message(self):
"""Returns a string describing the failure in more detail."""
raise NotImplementedError
@@ -113,8 +114,7 @@ class FailureTimeout(TestFailure):
def __init__(self, is_reftest=False):
self.is_reftest = is_reftest
- @staticmethod
- def message():
+ def message(self):
return "Test timed out"
def driver_needs_restart(self):
@@ -122,14 +122,16 @@ class FailureTimeout(TestFailure):
class FailureCrash(TestFailure):
- """DumpRenderTree crashed."""
- def __init__(self, is_reftest=False):
+ """DumpRenderTree/WebKitTestRunner crashed."""
+ def __init__(self, is_reftest=False, process_name='DumpRenderTree', pid=None):
+ self.process_name = process_name
+ self.pid = pid
self.is_reftest = is_reftest
- @staticmethod
- def message():
- # FIXME: This is wrong for WebKit2 (which uses WebKitTestRunner).
- return "DumpRenderTree crashed"
+ def message(self):
+ if self.pid:
+ return "%s (pid %d) crashed" % (self.process_name, self.pid)
+ return self.process_name + " crashed"
def driver_needs_restart(self):
return True
@@ -138,32 +140,28 @@ class FailureCrash(TestFailure):
class FailureMissingResult(TestFailure):
"""Expected result was missing."""
- @staticmethod
- def message():
+ def message(self):
return "No expected results found"
class FailureTextMismatch(TestFailure):
"""Text diff output failed."""
- @staticmethod
- def message():
+ def message(self):
return "Text diff mismatch"
class FailureMissingImageHash(TestFailure):
"""Actual result hash was missing."""
- @staticmethod
- def message():
+ def message(self):
return "No expected image hash found"
class FailureMissingImage(TestFailure):
"""Actual result image was missing."""
- @staticmethod
- def message():
+ def message(self):
return "No expected image found"
@@ -172,16 +170,14 @@ class FailureImageHashMismatch(TestFailure):
def __init__(self, diff_percent=0):
self.diff_percent = diff_percent
- @staticmethod
- def message():
+ def message(self):
return "Image mismatch"
class FailureImageHashIncorrect(TestFailure):
"""Actual result hash is incorrect."""
- @staticmethod
- def message():
+ def message(self):
return "Images match, expected image hash incorrect. "
@@ -190,9 +186,9 @@ class FailureReftestMismatch(TestFailure):
def __init__(self, reference_filename=None):
self.reference_filename = reference_filename
+ self.diff_percent = None
- @staticmethod
- def message():
+ def message(self):
return "Mismatch with reference"
@@ -202,8 +198,7 @@ class FailureReftestMismatchDidNotOccur(TestFailure):
def __init__(self, reference_filename=None):
self.reference_filename = reference_filename
- @staticmethod
- def message():
+ def message(self):
return "Mismatch with the reference did not occur"
@@ -213,31 +208,35 @@ class FailureReftestNoImagesGenerated(TestFailure):
def __init__(self, reference_filename=None):
self.reference_filename = reference_filename
- @staticmethod
- def message():
+ def message(self):
return "Reftest didn't generate pixel results."
class FailureMissingAudio(TestFailure):
"""Actual result image was missing."""
- @staticmethod
- def message():
+ def message(self):
return "No expected audio found"
class FailureAudioMismatch(TestFailure):
"""Audio files didn't match."""
- @staticmethod
- def message():
+ def message(self):
return "Audio mismatch"
+class FailureEarlyExit(TestFailure):
+ def message(self):
+ return "Skipped due to early exit"
+
+
# Convenient collection of all failure classes for anything that might
# need to enumerate over them all.
ALL_FAILURE_CLASSES = (FailureTimeout, FailureCrash, FailureMissingResult,
FailureTextMismatch, FailureMissingImageHash,
FailureMissingImage, FailureImageHashMismatch,
FailureImageHashIncorrect, FailureReftestMismatch,
- FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated)
+ FailureReftestMismatchDidNotOccur, FailureReftestNoImagesGenerated,
+ FailureMissingAudio, FailureAudioMismatch,
+ FailureEarlyExit)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
index ed9104738..3b9ba33d0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
@@ -63,3 +63,7 @@ class TestFailuresTest(unittest.TestCase):
# The hash happens to be the name of the class, but sets still work:
crash_set = set([FailureCrash(), "FailureCrash"])
self.assertEqual(len(crash_set), 2)
+
+ def test_crashes(self):
+ self.assertEquals(FailureCrash().message(), 'DumpRenderTree crashed')
+ self.assertEquals(FailureCrash(process_name='foo', pid=1234).message(), 'foo (pid 1234) crashed')
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
index 5c8b30d2e..5a016f621 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
@@ -29,24 +29,24 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-class TestInput:
+class TestInput(object):
"""Groups information about a test for easy passing of data."""
- # To save footprints since most TestInput instances don't have to have these fields.
- ref_file = None
- is_mismatch_reftest = None
-
- def __init__(self, test_name, timeout, should_run_pixel_test=True):
+ def __init__(self, test_name, timeout):
"""Holds the input parameters for a test.
Args:
test: name of test (not an absolute path!)
timeout: Timeout in msecs the driver should use while running the test
- ref_file: name of reference_filename (not an absolute path!)
- is_mismatch_test: true when the test is a mismatch reftest.
"""
self.test_name = test_name
self.timeout = timeout
- self.should_run_pixel_test = should_run_pixel_test
+
+ # TestInput objects are normally constructed by the manager and passed
+ # to the workers, but these two fields are set lazily in the workers
+ # because they require us to figure out if the test is a reftest or not
+ # and we want to be able to do that in parallel.
+ self.should_run_pixel_tests = None
+ self.reference_files = None
def __repr__(self):
- return "TestInput('%s', %d)" % (self.test_name, self.timeout)
+ return "TestInput('%s', %d, %s, %s)" % (self.test_name, self.timeout, self.should_run_pixel_tests, self.reference_files)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/__init__.py b/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
index e58469826..93bda9f56 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
@@ -33,3 +33,4 @@ import builders # Why is this in port?
from base import Port # It's possible we don't need to export this virtual baseclass outside the module.
from driver import Driver, DriverInput, DriverOutput
+from factory import port_options
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/apple.py b/Tools/Scripts/webkitpy/layout_tests/port/apple.py
index ac425c96e..b6e3b1d2e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/apple.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/apple.py
@@ -90,7 +90,6 @@ class ApplePort(WebKitPort):
version = self.FUTURE_VERSION
for build_type in self.ALL_BUILD_TYPES:
- # Win and Mac ports both happen to only exist on x86 architectures and always use cpu graphics (gpu graphics is a chromium-only hack).
# But at some later point we may need to make these configurable by the MacPort and WinPort subclasses.
- configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type, graphics_type='cpu'))
+ configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type))
return configurations
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index 11aa5081c..6c1203c53 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -100,7 +100,6 @@ class Port(object):
# These are default values that should be overridden in a subclasses.
self._version = ''
self._architecture = 'x86'
- self._graphics_type = 'cpu'
# FIXME: Ideally we'd have a package-wide way to get a
# well-formed options object that had all of the necessary
@@ -142,6 +141,9 @@ class Port(object):
self._reftest_list = {}
self._results_directory = None
+ def default_test_timeout_ms(self):
+ return 6 * 1000
+
def wdiff_available(self):
if self._wdiff_available is None:
self._wdiff_available = self.check_wdiff(logging=False)
@@ -152,6 +154,9 @@ class Port(object):
self._pretty_patch_available = self.check_pretty_patch(logging=False)
return self._pretty_patch_available
+ def should_retry_crashes(self):
+ return False
+
def default_child_processes(self):
"""Return the number of DumpRenderTree instances to use for this port."""
cpu_count = self._executive.cpu_count()
@@ -166,9 +171,6 @@ class Port(object):
return min(supportable_instances, cpu_count)
return cpu_count
- def default_worker_model(self):
- return 'processes'
-
def worker_startup_delay_secs(self):
# FIXME: If we start workers up too quickly, DumpRenderTree appears
# to thrash on something and time out its first few tests. Until
@@ -288,9 +290,6 @@ class Port(object):
actual_filename)
return ''.join(diff)
- def is_crash_reporter(self, process_name):
- return False
-
def check_for_leaks(self, process_name, process_pid):
# Subclasses should check for leaks in the running process
# and print any necessary warnings if leaks are found.
@@ -307,6 +306,27 @@ class Port(object):
# FIXME: Seems we should get this from the Port's Driver class.
return "DumpRenderTree"
+ def expected_baselines_by_extension(self, test_name):
+ """Returns a dict mapping baseline suffix to relative path for each baseline in
+ a test. For reftests, it returns ".==" or ".!=" instead of the suffix."""
+ # FIXME: The name similarity between this and expected_baselines() below, is unfortunate.
+ # We should probably rename them both.
+ baseline_dict = {}
+ reference_files = self.reference_files(test_name)
+ if reference_files:
+ # FIXME: How should this handle more than one type of reftest?
+ baseline_dict['.' + reference_files[0][0]] = self.relative_test_filename(reference_files[0][1])
+
+ for extension in self.baseline_extensions():
+ path = self.expected_filename(test_name, extension, return_default=False)
+ baseline_dict[extension] = self.relative_test_filename(path) if path else path
+
+ return baseline_dict
+
+ def baseline_extensions(self):
+ """Returns a tuple of all of the non-reftest baseline extensions we use. The extensions include the leading '.'."""
+ return ('.wav', '.webarchive', '.txt', '.png')
+
def expected_baselines(self, test_name, suffix, all_baselines=False):
"""Given a test name, finds where the baseline results are located.
@@ -356,7 +376,7 @@ class Port(object):
return [(None, baseline_filename)]
- def expected_filename(self, test_name, suffix):
+ def expected_filename(self, test_name, suffix, return_default=True):
"""Given a test name, returns an absolute path to its expected results.
If no expected results are found in any of the searched directories,
@@ -371,6 +391,8 @@ class Port(object):
platform: the most-specific directory name to use to build the
search list of directories, e.g., 'chromium-win', or
'chromium-cg-mac-leopard' (we follow the WebKit format)
+ return_default: if True, returns the path to the generic expectation if nothing
+ else is found; if False, returns None.
This routine is generic but is implemented here to live alongside
the other baseline and filename manipulation routines.
@@ -384,7 +406,9 @@ class Port(object):
if actual_test_name:
return self.expected_filename(actual_test_name, suffix)
- return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
+ if return_default:
+ return self._filesystem.join(self.layout_tests_dir(), baseline_filename)
+ return None
def expected_checksum(self, test_name):
"""Returns the checksum of the image we expect the test to produce, or None if it is a text-only test."""
@@ -570,8 +594,9 @@ class Port(object):
def webkit_base(self):
return self._filesystem.abspath(self.path_from_webkit_base('.'))
- def skipped_layout_tests(self):
- return []
+ def skipped_layout_tests(self, test_list):
+ """Returns the set of tests found in Skipped files. Does *not* include tests marked as SKIP in expectations files."""
+ return set([])
def _tests_from_skipped_file_contents(self, skipped_file_contents):
tests_to_skip = []
@@ -599,21 +624,6 @@ class Port(object):
def skipped_perf_tests(self):
return self._expectations_from_skipped_files([self.perf_tests_dir()])
- def skipped_tests(self, test_list):
- return []
-
- def skips_layout_test(self, test_name):
- """Figures out if the givent test is being skipped or not.
-
- Test categories are handled as well."""
- for test_or_category in self.skipped_layout_tests():
- if test_or_category == test_name:
- return True
- category = self._filesystem.join(self.layout_tests_dir(), test_or_category)
- if self._filesystem.isdir(category) and test_name.startswith(test_or_category):
- return True
- return False
-
def skips_perf_test(self, test_name):
for test_or_category in self.skipped_perf_tests():
if test_or_category == test_name:
@@ -625,7 +635,7 @@ class Port(object):
def name(self):
"""Returns a name that uniquely identifies this particular type of port
- (e.g., "mac-snowleopard" or "chromium-gpu-linux-x86_x64" and can be passed
+ (e.g., "mac-snowleopard" or "chromium-linux-x86_x64" and can be passed
to factory.get() to instantiate the port."""
return self._name
@@ -641,10 +651,6 @@ class Port(object):
expectations, determining search paths, and logging information."""
return self._version
- def graphics_type(self):
- """Returns whether the port uses accelerated graphics ('gpu') or not ('cpu')."""
- return self._graphics_type
-
def architecture(self):
return self._architecture
@@ -708,6 +714,10 @@ class Port(object):
"""Perform port-specific work at the beginning of a test run."""
pass
+ def clean_up_test_run(self):
+ """Perform port-specific work at the end of a test run."""
+ pass
+
# FIXME: os.environ access should be moved to onto a common/system class to be more easily mockable.
def _value_or_default_from_environ(self, name, default=None):
if name in os.environ:
@@ -729,6 +739,7 @@ class Port(object):
'LANG',
'LD_LIBRARY_PATH',
'DBUS_SESSION_BUS_ADDRESS',
+ 'XDG_DATA_DIRS',
# Darwin:
'DYLD_LIBRARY_PATH',
@@ -828,7 +839,7 @@ class Port(object):
def test_configuration(self):
"""Returns the current TestConfiguration for the port."""
if not self._test_configuration:
- self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower(), self._graphics_type)
+ self._test_configuration = TestConfiguration(self._version, self._architecture, self._options.configuration.lower())
return self._test_configuration
# FIXME: Belongs on a Platform object.
@@ -877,7 +888,13 @@ class Port(object):
it is possible that you might need "downstream" expectations that
temporarily override the "upstream" expectations until the port can
sync up the two repos."""
- return None
+ overrides = ''
+ for path in self.get_option('additional_expectations', []):
+ if self._filesystem.exists(self._filesystem.expanduser(path)):
+ overrides += self._filesystem.read_text_file(self._filesystem.expanduser(path))
+ else:
+ _log.warning("overrides path '%s' does not exist" % path)
+ return overrides or None
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base.
@@ -1056,6 +1073,18 @@ class Port(object):
"""Returns the port's driver implementation."""
raise NotImplementedError('Port._driver_class')
+ def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+ name_str = name or '<unknown process name>'
+ pid_str = str(pid or '<unknown>')
+ stdout_lines = (stdout or '<empty>').decode('utf8', 'replace').splitlines()
+ stderr_lines = (stderr or '<empty>').decode('utf8', 'replace').splitlines()
+ return 'crash log for %s (pid %s):\n%s\n%s\n' % (name_str, pid_str,
+ '\n'.join(('STDOUT: ' + l) for l in stdout_lines),
+ '\n'.join(('STDERR: ' + l) for l in stderr_lines))
+
+ def sample_process(self, name, pid):
+ pass
+
def virtual_test_suites(self):
return []
@@ -1089,7 +1118,7 @@ class Port(object):
def lookup_virtual_test_base(self, test_name):
for suite in self.populated_virtual_test_suites():
if test_name.startswith(suite.name):
- return suite.tests.get(test_name)
+ return test_name.replace(suite.name, suite.base)
return None
def lookup_virtual_test_args(self, test_name):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
index fde05fda0..d52f8819f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -208,15 +208,6 @@ class PortTest(unittest.TestCase):
port = self.make_port(config=config_mock.MockConfig(default_configuration='default'))
self.assertEqual(port.default_configuration(), 'default')
- def test_layout_tests_skipping(self):
- port = self.make_port()
- port.host.filesystem.write_text_file(port.layout_tests_dir() + '/media/video-zoom.html', '')
- port.host.filesystem.write_text_file(port.layout_tests_dir() + '/foo/bar.html', '')
- port.skipped_layout_tests = lambda: ['foo/bar.html', 'media']
- self.assertTrue(port.skips_layout_test('foo/bar.html'))
- self.assertTrue(port.skips_layout_test('media/video-zoom.html'))
- self.assertFalse(port.skips_layout_test('foo/foo.html'))
-
def test_setup_test_run(self):
port = self.make_port()
# This routine is a no-op. We just test it for coverage.
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/builders.py b/Tools/Scripts/webkitpy/layout_tests/port/builders.py
index ea6b468cd..cfab80da1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/builders.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/builders.py
@@ -37,9 +37,9 @@ from webkitpy.common.memoized import memoized
# * specifiers -- a set of specifiers, representing configurations covered by this builder.
_exact_matches = {
# These builders are on build.chromium.org.
- "Webkit Win": {"port_name": "chromium-win-xp", "specifiers": set(["xp", "release", "cpu"])},
+ "Webkit Win": {"port_name": "chromium-win-xp", "specifiers": set(["xp", "release"])},
"Webkit Vista": {"port_name": "chromium-win-vista", "specifiers": set(["vista"])},
- "Webkit Win7": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "cpu"])},
+ "Webkit Win7": {"port_name": "chromium-win-win7", "specifiers": set(["win7"])},
"Webkit Win (dbg)(1)": {"port_name": "chromium-win-xp", "specifiers": set(["win", "debug"])},
"Webkit Win (dbg)(2)": {"port_name": "chromium-win-xp", "specifiers": set(["win", "debug"])},
"Webkit Linux": {"port_name": "chromium-linux-x86_64", "specifiers": set(["linux", "x86_64", "release"])},
@@ -50,7 +50,7 @@ _exact_matches = {
"Webkit Mac10.5 (dbg)(2)": {"port_name": "chromium-mac-leopard", "specifiers": set(["leopard", "debug"])},
"Webkit Mac10.6": {"port_name": "chromium-mac-snowleopard", "specifiers": set(["snowleopard"])},
"Webkit Mac10.6 (dbg)": {"port_name": "chromium-mac-snowleopard", "specifiers": set(["snowleopard", "debug"])},
- "Webkit Mac10.7": {"port_name": "chromium-mac-lion", "specifiers": set(["lion"]), "move_overwritten_baselines_to": "chromium-mac-snowleopard"},
+ "Webkit Mac10.7": {"port_name": "chromium-mac-lion", "specifiers": set(["lion"])},
# These builders are on build.webkit.org.
"GTK Linux 32-bit Debug": {"port_name": "gtk", "specifiers": set(["gtk"])},
@@ -60,6 +60,7 @@ _exact_matches = {
"Qt Linux Release": {"port_name": "qt-linux", "specifiers": set(["win", "linux", "mac"])},
"Windows XP Debug (Tests)": {"port_name": "win-xp", "specifiers": set(["win"])},
"Windows 7 Release (WebKit2 Tests)": {"port_name": "win-future-wk2", "specifiers": set(["wk2"])},
+ "EFL Linux Release": {"port_name": "efl", "specifiers": set(["efl"])},
}
@@ -123,5 +124,5 @@ def builder_path_for_port_name(port_name):
builder_path_from_name(builder_name_for_port_name(port_name))
-def fallback_port_name_for_new_port(builder_name):
- return _exact_matches[builder_name].get("move_overwritten_baselines_to")
+def fallback_port_names_for_new_port(builder_name):
+ return _exact_matches[builder_name].get("move_overwritten_baselines_to", [])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index f80f53bb5..7b3cb71f9 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -46,7 +46,8 @@ from webkitpy.layout_tests.controllers.manager import Manager
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port.base import Port, VirtualTestSuite
-from webkitpy.layout_tests.port.driver import Driver, DriverOutput
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.layout_tests.port.webkit import WebKitDriver
from webkitpy.layout_tests.port import builders
from webkitpy.layout_tests.servers import http_server
from webkitpy.layout_tests.servers import websocket_server
@@ -67,9 +68,9 @@ class ChromiumPort(Port):
('win7', 'x86'),
('lucid', 'x86'),
('lucid', 'x86_64'),
- ('icecreamsandwich', 'arm'))
-
- ALL_GRAPHICS_TYPES = ('cpu', 'gpu')
+ # FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
+ # If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
+ ('icecreamsandwich', 'x86'))
ALL_BASELINE_VARIANTS = [
'chromium-mac-lion', 'chromium-mac-snowleopard', 'chromium-mac-leopard',
@@ -265,9 +266,15 @@ class ChromiumPort(Port):
def stop_helper(self):
if self._helper:
_log.debug("Stopping layout test helper")
- self._helper.stdin.write("x\n")
- self._helper.stdin.close()
- self._helper.wait()
+ try:
+ self._helper.stdin.write("x\n")
+ self._helper.stdin.close()
+ self._helper.wait()
+ except IOError, e:
+ pass
+ finally:
+ self._helper = None
+
def exit_code_from_summarized_results(self, unexpected_results):
# Turn bots red for missing results.
@@ -294,8 +301,7 @@ class ChromiumPort(Port):
test_configurations = []
for version, architecture in self.ALL_SYSTEMS:
for build_type in self.ALL_BUILD_TYPES:
- for graphics_type in self.ALL_GRAPHICS_TYPES:
- test_configurations.append(TestConfiguration(version, architecture, build_type, graphics_type))
+ test_configurations.append(TestConfiguration(version, architecture, build_type))
return test_configurations
try_builder_names = frozenset([
@@ -311,36 +317,33 @@ class ChromiumPort(Port):
# FIXME: It seems bad that run_webkit_tests.py uses a hardcoded dummy
# builder string instead of just using None.
builder_name = self.get_option('builder_name', 'DUMMY_BUILDER_NAME')
+ base_overrides = super(ChromiumPort, self).test_expectations_overrides()
if builder_name != 'DUMMY_BUILDER_NAME' and not '(deps)' in builder_name and not builder_name in self.try_builder_names:
- return None
+ return base_overrides
try:
overrides_path = self.path_from_chromium_base('webkit', 'tools', 'layout_tests', 'test_expectations.txt')
- except AssertionError:
- return None
+ except AssertionError, e:
+ return base_overrides
if not self._filesystem.exists(overrides_path):
- return None
- return self._filesystem.read_text_file(overrides_path)
-
- def skipped_layout_tests(self, extra_test_files=None):
- expectations_str = self.test_expectations()
- overrides_str = self.test_expectations_overrides()
- is_debug_mode = False
-
- all_test_files = self.tests([])
- if extra_test_files:
- all_test_files.update(extra_test_files)
-
- expectations = test_expectations.TestExpectations(
- self, all_test_files, expectations_str, self.test_configuration(),
- is_lint_mode=False, overrides=overrides_str)
- return expectations.get_tests_with_result_type(test_expectations.SKIP)
+ return base_overrides
+ return self._filesystem.read_text_file(overrides_path) + (base_overrides or '')
def repository_paths(self):
repos = super(ChromiumPort, self).repository_paths()
repos.append(('chromium', self.path_from_chromium_base('build')))
return repos
+ def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+ new_stderr = stderr
+ if stderr and 'AddressSanitizer' in stderr:
+ asan_filter_path = self.path_from_chromium_base('third_party', 'asan', 'scripts', 'asan_symbolize.py')
+ if self._filesystem.exists(asan_filter_path):
+ output = self._executive.run_command([asan_filter_path], input=stderr)
+ new_stderr = self._executive.run_command(['c++filt'], input=output)
+
+ return super(ChromiumPort, self)._get_crash_log(name, pid, stdout, new_stderr, newer_than)
+
def virtual_test_suites(self):
return [
VirtualTestSuite('platform/chromium/virtual/gpu/fast/canvas',
@@ -348,7 +351,11 @@ class ChromiumPort(Port):
['--enable-accelerated-2d-canvas']),
VirtualTestSuite('platform/chromium/virtual/gpu/canvas/philip',
'canvas/philip',
- ['--enable-accelerated-2d-canvas'])]
+ ['--enable-accelerated-2d-canvas']),
+ VirtualTestSuite('platform/chromium/virtual/threaded/compositing/visibility',
+ 'compositing/visibility',
+ ['--enable-threaded-compositing']),
+ ]
#
# PROTECTED METHODS
@@ -397,20 +404,34 @@ class ChromiumPort(Port):
return self._build_path(self.get_option('configuration'), binary_name)
-# FIXME: This should inherit from WebKitDriver now that Chromium has a DumpRenderTree process like the rest of WebKit.
-class ChromiumDriver(Driver):
+class ChromiumDriver(WebKitDriver):
+ KILL_TIMEOUT_DEFAULT = 3.0
+
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
- Driver.__init__(self, port, worker_number, pixel_tests, no_timeout)
+ WebKitDriver.__init__(self, port, worker_number, pixel_tests, no_timeout)
self._proc = None
self._image_path = None
+ # FIXME: Delete all of this driver code once we're satisfied that it's not needed any more.
+ if port.host.platform.os_version == 'snowleopard':
+ if not hasattr(port._options, 'additional_drt_flag'):
+ port._options.additional_drt_flag = []
+ if not '--test-shell' in port._options.additional_drt_flag:
+ port._options.additional_drt_flag.append('--test-shell')
+
+ self._test_shell = '--test-shell' in port.get_option('additional_drt_flag', [])
+
def _wrapper_options(self, pixel_tests):
cmd = []
- if pixel_tests or self._pixel_tests:
- if not self._image_path:
- self._image_path = self._port._filesystem.join(self._port.results_directory(), 'png_result%s.png' % self._worker_number)
- # See note above in diff_image() for why we need _convert_path().
- cmd.append("--pixel-tests=" + self._port._convert_path(self._image_path))
+ if pixel_tests:
+ if self._test_shell:
+ if not self._image_path:
+ self._image_path = self._port._filesystem.join(self._port.results_directory(), 'png_result%s.png' % self._worker_number)
+ # See note above in diff_image() for why we need _convert_path().
+ cmd.append("--pixel-tests=" + self._port._convert_path(self._image_path))
+ else:
+ cmd.append('--pixel-tests')
+
# FIXME: This is not None shouldn't be necessary, unless --js-flags="''" changes behavior somehow?
if self._port.get_option('js_flags') is not None:
cmd.append('--js-flags="' + self._port.get_option('js_flags') + '"')
@@ -444,15 +465,18 @@ class ChromiumDriver(Driver):
def cmd_line(self, pixel_tests, per_test_args):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
- # FIXME: Why does --test-shell exist? TestShell is dead, shouldn't this be removed?
- # It seems it's still in use in Tools/DumpRenderTree/chromium/DumpRenderTree.cpp as of 8/10/11.
- cmd.append('--test-shell')
cmd.extend(self._wrapper_options(pixel_tests))
cmd.extend(per_test_args)
+ if not self._test_shell:
+ cmd.append('-')
+
return cmd
def _start(self, pixel_tests, per_test_args):
+ if not self._test_shell:
+ return super(ChromiumDriver, self)._start(pixel_tests, per_test_args)
+
assert not self._proc
# FIXME: This should use ServerProcess like WebKitDriver does.
# FIXME: We should be reading stderr and stdout separately like how WebKitDriver does.
@@ -460,6 +484,9 @@ class ChromiumDriver(Driver):
self._proc = subprocess.Popen(self.cmd_line(pixel_tests, per_test_args), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=close_fds)
def has_crashed(self):
+ if not self._test_shell:
+ return super(ChromiumDriver, self).has_crashed()
+
if self._proc is None:
return False
return self._proc.poll() is not None
@@ -517,8 +544,11 @@ class ChromiumDriver(Driver):
self._port._filesystem.remove(self._image_path)
def run_test(self, driver_input):
+ if not self._test_shell:
+ return super(ChromiumDriver, self).run_test(driver_input)
+
if not self._proc:
- self._start(driver_input.is_reftest or self._pixel_tests, driver_input.args)
+ self._start(driver_input.should_run_pixel_test, driver_input.args)
output = []
error = []
@@ -577,6 +607,8 @@ class ChromiumDriver(Driver):
line, crash = self._write_command_and_read_line(input=None)
+ if crash and line is not None:
+ error.append(line)
run_time = time.time() - start_time
output_image = self._output_image_with_retry()
@@ -593,35 +625,44 @@ class ChromiumDriver(Driver):
text = None
error = ''.join(error)
- crashed_process_name = None
# Currently the stacktrace is in the text output, not error, so append the two together so
# that we can see stack in the output. See http://webkit.org/b/66806
# FIXME: We really should properly handle the stderr output separately.
+ crash_log = ''
+ crashed_process_name = None
+ crashed_pid = None
if crash:
- error = error + str(text)
crashed_process_name = self._port.driver_name()
+ if self._proc:
+ crashed_pid = self._proc.pid
+ crash_log = self._port._get_crash_log(crashed_process_name, crashed_pid, text, error, newer_than=start_time)
+ if text:
+ error = error + text
return DriverOutput(text, output_image, actual_checksum, audio=audio_bytes,
- crash=crash, crashed_process_name=crashed_process_name, test_time=run_time, timeout=timeout, error=error)
+ crash=crash, crashed_process_name=crashed_process_name, crashed_pid=crashed_pid, crash_log=crash_log,
+ test_time=run_time, timeout=timeout, error=error)
def start(self, pixel_tests, per_test_args):
if not self._proc:
self._start(pixel_tests, per_test_args)
def stop(self):
+ if not self._test_shell:
+ return super(ChromiumDriver, self).stop()
+
if not self._proc:
return
- # FIXME: If we used ServerProcess all this would happen for free with ServerProces.stop()
self._proc.stdin.close()
self._proc.stdout.close()
if self._proc.stderr:
self._proc.stderr.close()
time_out_ms = self._port.get_option('time_out_ms')
if time_out_ms and not self._no_timeout:
- # FIXME: Port object shouldn't be dependent on layout test manager.
- kill_timeout_seconds = 3.0 * int(time_out_ms) / Manager.DEFAULT_TEST_TIMEOUT_MS
+ timeout_ratio = float(time_out_ms) / self._port.default_test_timeout_ms()
+ kill_timeout_seconds = self.KILL_TIMEOUT_DEFAULT * timeout_ratio if timeout_ratio > 1.0 else self.KILL_TIMEOUT_DEFAULT
else:
- kill_timeout_seconds = 3.0
+ kill_timeout_seconds = self.KILL_TIMEOUT_DEFAULT
# Closing stdin/stdout/stderr hangs sometimes on OS X,
# (see __init__(), above), and anyway we don't want to hang
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
index 2b6d53625..566afba23 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
@@ -1,3 +1,4 @@
+#!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,7 +28,11 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
+import re
+import signal
+import time
+from webkitpy.layout_tests.port import base
from webkitpy.layout_tests.port import chromium
from webkitpy.layout_tests.port import factory
@@ -93,15 +98,11 @@ HOST_FONT_FILES = [
[MS_TRUETYPE_FONTS_DIR, 'Verdana_Bold.ttf'],
[MS_TRUETYPE_FONTS_DIR, 'Verdana_Bold_Italic.ttf'],
[MS_TRUETYPE_FONTS_DIR, 'Verdana_Italic.ttf'],
- # The Microsoft font EULA
- ['/usr/share/doc/ttf-mscorefonts-installer/', 'READ_ME!.gz'],
- ['/usr/share/fonts/truetype/ttf-dejavu/', 'DejaVuSans.ttf'],
]
# Should increase this version after changing HOST_FONT_FILES.
FONT_FILES_VERSION = 1
DEVICE_FONTS_DIR = DEVICE_DRT_DIR + 'fonts/'
-DEVICE_FIRST_FALLBACK_FONT = '/system/fonts/DroidNaskh-Regular.ttf'
# The layout tests directory on device, which has two usages:
# 1. as a virtual path in file urls that will be bridged to HTTP.
@@ -141,10 +142,11 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def __init__(self, host, port_name, **kwargs):
chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
+ # The Chromium port for Android always uses the hardware GPU path.
+ self._options.enable_hardware_gpu = True
+
self._operating_system = 'android'
self._version = 'icecreamsandwich'
- # FIXME: we may support other architectures in the future.
- self._architecture = 'arm'
self._original_governor = None
self._android_base_dir = None
@@ -154,6 +156,13 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
adb_args = self.get_option('adb_args')
if adb_args:
self._adb_command += shlex.split(adb_args)
+ self._drt_retry_after_killed = 0
+
+ def default_test_timeout_ms(self):
+ # Android platform has less computing power than desktop platforms.
+ # Using 10 seconds allows us to pass most slow tests which are not
+ # marked as slow tests on desktop platforms.
+ return 10 * 1000
def default_child_processes(self):
# Currently we only use one process, but it might be helpful to use
@@ -175,9 +184,6 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
return False
return True
- def default_worker_model(self):
- return 'inline'
-
def test_expectations(self):
# Automatically apply all expectation rules of chromium-linux to
# chromium-android.
@@ -204,7 +210,6 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
self._push_executable()
self._push_fonts()
- self._setup_system_font_for_test()
self._synchronize_datetime()
# Start the HTTP server so that the device can access the test cases.
@@ -214,11 +219,17 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
cmd = self._run_adb_command(['shell', '%s %s' % (DEVICE_FORWARDER_PATH, FORWARD_PORTS)])
def stop_helper(self):
- self._restore_system_font()
# Leave the forwarder and tests httpd server there because they are
# useful for debugging and do no harm to subsequent tests.
self._teardown_performance()
+ def skipped_tests(self, test_list):
+ return base.Port._real_tests(self, [
+ # Canvas tests are run as virtual gpu tests.
+ 'fast/canvas',
+ 'canvas/philip',
+ ])
+
def _build_path(self, *comps):
return self._host_port._build_path(*comps)
@@ -290,20 +301,6 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
self._push_to_device(host_dir + font_file, DEVICE_FONTS_DIR + font_file)
self._update_version(DEVICE_FONTS_DIR, FONT_FILES_VERSION)
- def _setup_system_font_for_test(self):
- # The DejaVu font implicitly used by some CSS 2.1 tests should be added
- # into the font fallback list of the system. DroidNaskh-Regular.ttf is
- # the first font in Android Skia's font fallback list. Fortunately the
- # DejaVu font also contains Naskh glyphs.
- # First remount /system in read/write mode.
- self._run_adb_command(['remount'])
- self._copy_device_file(DEVICE_FONTS_DIR + 'DejaVuSans.ttf', DEVICE_FIRST_FALLBACK_FONT)
-
- def _restore_system_font(self):
- # First remount /system in read/write mode.
- self._run_adb_command(['remount'])
- self._push_to_device(os.environ['OUT'] + DEVICE_FIRST_FALLBACK_FONT, DEVICE_FIRST_FALLBACK_FONT)
-
def _push_test_resources(self):
_log.debug('Pushing test resources')
for resource in TEST_RESOURCES_TO_PUSH:
@@ -360,9 +357,10 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def get_last_stacktrace(self):
tombstones = self._run_adb_command(['shell', 'ls', '-n', '/data/tombstones'])
- tombstones = tombstones.rstrip().split('\n')
if not tombstones:
+ _log.error('DRT crashed, but no tombstone found!')
return ''
+ tombstones = tombstones.rstrip().split('\n')
last_tombstone = tombstones[0].split()
for tombstone in tombstones[1:]:
# Format of fields:
@@ -396,6 +394,7 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver):
def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
chromium.ChromiumDriver.__init__(self, port, worker_number, pixel_tests, no_timeout)
self._device_image_path = None
+ self._drt_return_parser = re.compile('#DRT_RETURN (\d+)')
def _start(self, pixel_tests, per_test_args):
# Convert the original command line into to two parts:
@@ -439,7 +438,7 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver):
# the process. Sleep 1 second (long enough for debuggerd to dump
# stack) before exiting the shell to ensure the process has quit,
# otherwise the exit will fail because "You have stopped jobs".
- drt_cmd = '%s %s 2>%s;sleep 1;exit\n' % (DEVICE_DRT_PATH, ' '.join(drt_args), DEVICE_DRT_STDERR)
+ drt_cmd = '%s %s 2>%s;echo "#DRT_RETURN $?";sleep 1;exit\n' % (DEVICE_DRT_PATH, ' '.join(drt_args), DEVICE_DRT_STDERR)
_log.debug('Starting DumpRenderTree: ' + drt_cmd)
# Wait until DRT echos '#READY'.
@@ -466,13 +465,30 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver):
def run_test(self, driver_input):
driver_output = chromium.ChromiumDriver.run_test(self, driver_input)
+
+ drt_return = self._get_drt_return_value(driver_output.error)
+ if drt_return is not None:
+ _log.debug('DumpRenderTree return value: %d' % drt_return)
# FIXME: Retrieve stderr from the target.
if driver_output.crash:
+ # When Android is OOM, it sends a SIGKILL signal to DRT. DRT
+ # is stopped silently and regarded as crashed. Re-run the test for
+ # such crash.
+ if drt_return == 128 + signal.SIGKILL:
+ self._port._drt_retry_after_killed += 1
+ if self._port._drt_retry_after_killed > 10:
+ raise AssertionError('DumpRenderTree is killed by Android for too many times!')
+ _log.error('DumpRenderTree is killed by SIGKILL. Retry the test (%d).' % self._port._drt_retry_after_killed)
+ self.stop()
+ # Sleep 10 seconds to let system recover.
+ time.sleep(10)
+ return self.run_test(driver_input)
# Fetch the stack trace from the tombstone file.
# FIXME: sometimes the crash doesn't really happen so that no
# tombstone is generated. In that case we fetch the wrong stack
# trace.
- driver_output.error += self._port.get_last_stacktrace()
+ driver_output.error += self._port.get_last_stacktrace().encode('ascii', 'ignore')
+ driver_output.error += self._port._run_adb_command(['logcat', '-d']).encode('ascii', 'ignore')
return driver_output
def stop(self):
@@ -522,6 +538,10 @@ class ChromiumAndroidDriver(chromium.ChromiumDriver):
# We use the Shell output as a crash hint.
return line is not None and line.find('[1] + Stopped (signal)') >= 0
+ def _get_drt_return_value(self, error):
+ return_match = self._drt_return_parser.search(error)
+ return None if (return_match is None) else int(return_match.group(1))
+
def _read_prompt(self):
last_char = ''
while True:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
index dd652fe02..05598a322 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
@@ -38,4 +38,3 @@ from webkitpy.layout_tests.port import port_testcase
class ChromiumAndroidPortTest(port_testcase.PortTestCase):
port_name = 'chromium-android'
port_maker = chromium_android.ChromiumAndroidPort
- expected_default_worker_model = 'inline'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
index 1a9ba30cd..578969a01 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
@@ -46,7 +46,6 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
'chromium-linux',
'chromium-win',
'chromium',
- 'win',
'mac',
],
'x86': [
@@ -54,7 +53,6 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
'chromium-linux',
'chromium-win',
'chromium',
- 'win',
'mac',
],
}
@@ -112,11 +110,9 @@ class ChromiumLinuxPort(chromium.ChromiumPort):
def __init__(self, host, port_name, **kwargs):
chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
(base, arch) = port_name.rsplit('-', 1)
- assert base in ('chromium-linux', 'chromium-gpu-linux')
+ assert base == 'chromium-linux'
assert arch in self.SUPPORTED_ARCHITECTURES
- assert port_name in ('chromium-linux', 'chromium-gpu-linux',
- 'chromium-linux-x86', 'chromium-linux-x86_64',
- 'chromium-gpu-linux-x86', 'chromium-gpu-linux-x86_64')
+ assert port_name in ('chromium-linux', 'chromium-linux-x86', 'chromium-linux-x86_64')
self._version = 'lucid' # We only support lucid right now.
self._architecture = arch
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
index 3d8b09d85..647596227 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
@@ -69,7 +69,6 @@ class ChromiumLinuxPortTest(port_testcase.PortTestCase):
# FIXME: Check that, for now, these are illegal port names.
# Eventually we should be able to do the right thing here.
self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, MockSystemHost(), port_name='chromium-x86-linux')
- self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, MockSystemHost(), port_name='chromium-linux-x86-gpu')
def test_determine_architecture_fails(self):
# Test that we default to 'x86' if the driver doesn't exist.
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
index 30f4aa991..e1cafccfd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
@@ -48,23 +48,17 @@ class ChromiumMacPort(chromium.ChromiumPort):
'chromium-mac-snowleopard',
'chromium-mac',
'chromium',
- 'mac-leopard',
- 'mac-snowleopard',
- 'mac-lion',
'mac',
],
'snowleopard': [
'chromium-mac-snowleopard',
'chromium-mac',
'chromium',
- 'mac-snowleopard',
- 'mac-lion',
'mac',
],
'lion': [
'chromium-mac',
'chromium',
- 'mac-lion',
'mac',
],
'future': [
@@ -105,6 +99,16 @@ class ChromiumMacPort(chromium.ChromiumPort):
def operating_system(self):
return 'mac'
+ def default_child_processes(self):
+ # FIXME: As a temporary workaround while we figure out what's going
+ # on with https://bugs.webkit.org/show_bug.cgi?id=83076, reduce by
+ # half the # of workers we run by default on bigger machines.
+ default_count = super(ChromiumMacPort, self).default_child_processes()
+ if default_count >= 8:
+ cpu_count = self._executive.cpu_count()
+ return max(1, min(default_count, int(cpu_count / 2)))
+ return default_count
+
#
# PROTECTED METHODS
#
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
index a8a746ed9..51948eceb 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
@@ -78,11 +78,6 @@ class ChromiumMacPortTest(port_testcase.PortTestCase):
port = self.make_port(port_name='chromium-mac-lion')
self.assertEquals(port.baseline_path(), port._webkit_baseline_path('chromium-mac'))
- def test_graphics_type(self):
- self.assertEquals('cpu', self.make_port(port_name='chromium-mac').graphics_type())
- # Mac defaults to cpu graphics type.
- self.assertEquals('cpu', self.make_port().graphics_type())
-
def test_operating_system(self):
self.assertEqual('mac', self.make_port().operating_system())
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
index a46cb5070..026f81162 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
@@ -35,6 +35,7 @@ from webkitpy.common.system import logtesting
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
+from webkitpy.layout_tests.port.config_mock import MockConfig
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
@@ -51,8 +52,11 @@ from webkitpy.layout_tests.port.driver import DriverInput
class ChromiumDriverTest(unittest.TestCase):
def setUp(self):
- mock_port = Mock() # FIXME: This should use a tighter mock.
- self.driver = chromium.ChromiumDriver(mock_port, worker_number=0, pixel_tests=True)
+ host = MockSystemHost()
+ options = MockOptions(configuration='Release', additional_drt_flag=['--test-shell'])
+ config = MockConfig(filesystem=host.filesystem, default_configuration='Release')
+ self.port = chromium_mac.ChromiumMacPort(host, 'chromium-mac-snowleopard', options=options, config=config)
+ self.driver = chromium.ChromiumDriver(self.port, worker_number=0, pixel_tests=True)
def test_test_shell_command(self):
expected_command = "test.html 2 checksum\n"
@@ -91,17 +95,23 @@ class ChromiumDriverTest(unittest.TestCase):
self.driver._proc.stdout.readline = mock_readline
self._assert_write_command_and_read_line(expected_crash=True)
- def test_crashed_process_name(self):
+ def test_crash_log(self):
self.driver._proc = Mock()
# Simulate a crash by having stdout close unexpectedly.
def mock_readline():
raise IOError
self.driver._proc.stdout.readline = mock_readline
+ self.driver._proc.pid = 1234
self.driver.test_to_uri = lambda test: 'mocktesturi'
- driver_output = self.driver.run_test(DriverInput(test_name='some/test.html', timeout=1, image_hash=None, is_reftest=False))
- self.assertEqual(self.driver._port.driver_name(), driver_output.crashed_process_name)
+ self.driver._port.driver_name = lambda: 'mockdriver'
+ self.driver._port._get_crash_log = lambda name, pid, out, err, newer_than: 'mockcrashlog'
+ driver_output = self.driver.run_test(DriverInput(test_name='some/test.html', timeout=1, image_hash=None, should_run_pixel_test=False))
+ self.assertTrue(driver_output.crash)
+ self.assertEqual(driver_output.crashed_process_name, 'mockdriver')
+ self.assertEqual(driver_output.crashed_pid, 1234)
+ self.assertEqual(driver_output.crash_log, 'mockcrashlog')
def test_stop(self):
self.pid = None
@@ -125,27 +135,24 @@ class ChromiumDriverTest(unittest.TestCase):
self.driver._proc.poll = lambda: 2
self.driver._port._executive = FakeExecutive()
- # Override the kill timeout (ms) so the test runs faster.
- self.driver._port.get_option = lambda name: 1
+ self.driver.KILL_TIMEOUT_DEFAULT = 0.01
self.driver.stop()
self.assertTrue(self.wait_called)
self.assertEquals(self.pid, 1)
def test_two_drivers(self):
- mock_port = Mock()
class MockDriver(chromium.ChromiumDriver):
- def __init__(self):
- chromium.ChromiumDriver.__init__(self, mock_port, worker_number=0, pixel_tests=False)
+ def __init__(self, port):
+ chromium.ChromiumDriver.__init__(self, port, worker_number=0, pixel_tests=False)
def cmd_line(self, pixel_test, per_test_args):
return 'python'
# get_option is used to get the timeout (ms) for a process before we kill it.
- mock_port.get_option = lambda name: 60 * 1000
- driver1 = MockDriver()
+ driver1 = MockDriver(self.port)
driver1._start(False, [])
- driver2 = MockDriver()
+ driver2 = MockDriver(self.port)
driver2._start(False, [])
# It's possible for driver1 to timeout when stopping if it's sharing stdin with driver2.
start_time = time.time()
@@ -162,42 +169,24 @@ class ChromiumPortTest(port_testcase.PortTestCase):
"""Validate the complete set of configurations this port knows about."""
port = self.make_port()
self.assertEquals(set(port.all_test_configurations()), set([
- TestConfiguration('icecreamsandwich', 'arm', 'debug', 'cpu'),
- TestConfiguration('icecreamsandwich', 'arm', 'release', 'cpu'),
- TestConfiguration('icecreamsandwich', 'arm', 'debug', 'gpu'),
- TestConfiguration('icecreamsandwich', 'arm', 'release', 'gpu'),
- TestConfiguration('leopard', 'x86', 'debug', 'cpu'),
- TestConfiguration('leopard', 'x86', 'debug', 'gpu'),
- TestConfiguration('leopard', 'x86', 'release', 'cpu'),
- TestConfiguration('leopard', 'x86', 'release', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'debug', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'debug', 'gpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'cpu'),
- TestConfiguration('snowleopard', 'x86', 'release', 'gpu'),
- TestConfiguration('lion', 'x86', 'debug', 'cpu'),
- TestConfiguration('lion', 'x86', 'debug', 'gpu'),
- TestConfiguration('lion', 'x86', 'release', 'cpu'),
- TestConfiguration('lion', 'x86', 'release', 'gpu'),
- TestConfiguration('xp', 'x86', 'debug', 'cpu'),
- TestConfiguration('xp', 'x86', 'debug', 'gpu'),
- TestConfiguration('xp', 'x86', 'release', 'cpu'),
- TestConfiguration('xp', 'x86', 'release', 'gpu'),
- TestConfiguration('vista', 'x86', 'debug', 'cpu'),
- TestConfiguration('vista', 'x86', 'debug', 'gpu'),
- TestConfiguration('vista', 'x86', 'release', 'cpu'),
- TestConfiguration('vista', 'x86', 'release', 'gpu'),
- TestConfiguration('win7', 'x86', 'debug', 'cpu'),
- TestConfiguration('win7', 'x86', 'debug', 'gpu'),
- TestConfiguration('win7', 'x86', 'release', 'cpu'),
- TestConfiguration('win7', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'cpu'),
- TestConfiguration('lucid', 'x86', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86', 'release', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'debug', 'gpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'cpu'),
- TestConfiguration('lucid', 'x86_64', 'release', 'gpu'),
+ TestConfiguration('icecreamsandwich', 'x86', 'debug'),
+ TestConfiguration('icecreamsandwich', 'x86', 'release'),
+ TestConfiguration('leopard', 'x86', 'debug'),
+ TestConfiguration('leopard', 'x86', 'release'),
+ TestConfiguration('snowleopard', 'x86', 'debug'),
+ TestConfiguration('snowleopard', 'x86', 'release'),
+ TestConfiguration('lion', 'x86', 'debug'),
+ TestConfiguration('lion', 'x86', 'release'),
+ TestConfiguration('xp', 'x86', 'debug'),
+ TestConfiguration('xp', 'x86', 'release'),
+ TestConfiguration('vista', 'x86', 'debug'),
+ TestConfiguration('vista', 'x86', 'release'),
+ TestConfiguration('win7', 'x86', 'debug'),
+ TestConfiguration('win7', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86', 'debug'),
+ TestConfiguration('lucid', 'x86', 'release'),
+ TestConfiguration('lucid', 'x86_64', 'debug'),
+ TestConfiguration('lucid', 'x86_64', 'release'),
]))
def test_driver_cmd_line(self):
@@ -254,22 +243,6 @@ class ChromiumPortTest(port_testcase.PortTestCase):
self.assertTrue(ChromiumPortTest.TestMacPort()._path_to_image_diff().endswith('/xcodebuild/default/ImageDiff'))
self.assertTrue(ChromiumPortTest.TestWinPort()._path_to_image_diff().endswith('/default/ImageDiff.exe'))
- def test_skipped_layout_tests(self):
- mock_options = MockOptions()
- mock_options.configuration = 'release'
- port = ChromiumPortTest.TestLinuxPort(options=mock_options)
-
- fake_test = 'fast/js/not-good.js'
-
- port.test_expectations = lambda: """BUG_TEST SKIP : fast/js/not-good.js = TEXT
-LINUX WIN : fast/js/very-good.js = TIMEOUT PASS"""
- port.test_expectations_overrides = lambda: ''
- port.tests = lambda paths: set()
- port.test_exists = lambda test: True
-
- skipped_tests = port.skipped_layout_tests(extra_test_files=[fake_test, ])
- self.assertTrue("fast/js/not-good.js" in skipped_tests)
-
def test_default_configuration(self):
mock_options = MockOptions()
port = ChromiumPortTest.TestLinuxPort(options=mock_options)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
index 4e79fb8c8..409643321 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
@@ -51,20 +51,17 @@ class ChromiumWinPort(chromium.ChromiumPort):
'chromium-win-vista',
'chromium-win',
'chromium',
- 'win',
'mac',
],
'vista': [
'chromium-win-vista',
'chromium-win',
'chromium',
- 'win',
'mac',
],
'win7': [
'chromium-win',
'chromium',
- 'win',
'mac',
],
}
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py b/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py
index af71fa332..e50ad4f84 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/config_mock.py
@@ -29,13 +29,24 @@
"""Wrapper objects for WebKit-specific utility routines."""
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+
class MockConfig(object):
- def __init__(self, default_configuration='Release'):
+ _FLAGS_FROM_CONFIGURATIONS = {
+ "Debug": "--debug",
+ "Release": "--release",
+ }
+
+ def __init__(self, filesystem=None, default_configuration='Release'):
+ self._filesystem = filesystem or MockFileSystem()
self._default_configuration = default_configuration
+ def flag_for_configuration(self, configuration):
+ return self._FLAGS_FROM_CONFIGURATIONS[configuration]
+
def build_directory(self, configuration):
- return "/build"
+ return "/mock-build"
def build_dumprendertree(self, configuration):
return True
@@ -44,7 +55,12 @@ class MockConfig(object):
return self._default_configuration
def path_from_webkit_base(self, *comps):
- return "/" + "/".join(list(comps))
+ # FIXME: This could use self._filesystem.join, but that doesn't handle empty lists.
+ return self.webkit_base_dir() + "/" + "/".join(list(comps))
+
+ def script_path(self, script_name):
+ # This is intentionally relative. Callers should pass the checkout_root/webkit_base_dir to run_command as the cwd.
+ return self._filesystem.join("Tools", "Scripts", script_name)
def webkit_base_dir(self):
- return "/"
+ return "/mock-checkout"
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
index fa6e81b2e..a113da3a8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
@@ -34,11 +34,11 @@ from webkitpy.common.system import path
class DriverInput(object):
- def __init__(self, test_name, timeout, image_hash, is_reftest, args=None):
+ def __init__(self, test_name, timeout, image_hash, should_run_pixel_test, args=None):
self.test_name = test_name
self.timeout = timeout # in ms
self.image_hash = image_hash
- self.is_reftest = is_reftest
+ self.should_run_pixel_test = should_run_pixel_test
self.args = args or []
@@ -64,7 +64,8 @@ class DriverOutput(object):
strip_patterns.append((re.compile('scrollHeight [0-9]+'), 'scrollHeight'))
def __init__(self, text, image, image_hash, audio, crash=False,
- test_time=0, timeout=False, error='', crashed_process_name=None):
+ test_time=0, timeout=False, error='', crashed_process_name='??',
+ crashed_pid=None, crash_log=None):
# FIXME: Args could be renamed to better clarify what they do.
self.text = text
self.image = image # May be empty-string if the test crashes.
@@ -73,6 +74,8 @@ class DriverOutput(object):
self.audio = audio # Binary format is port-dependent.
self.crash = crash
self.crashed_process_name = crashed_process_name
+ self.crashed_pid = crashed_pid
+ self.crash_log = crash_log
self.test_time = test_time
self.timeout = timeout
self.error = error # stderr output
@@ -101,7 +104,6 @@ class Driver(object):
"""
self._port = port
self._worker_number = worker_number
- self._pixel_tests = pixel_tests
self._no_timeout = no_timeout
def run_test(self, driver_input):
@@ -178,7 +180,6 @@ class DriverProxy(object):
self._worker_number = worker_number
self._driver_instance_constructor = driver_instance_constructor
self._no_timeout = no_timeout
- self._pixel_tests = pixel_tests
# FIXME: We shouldn't need to create a driver until we actually run a test.
self._driver = self._make_driver(pixel_tests)
@@ -208,7 +209,7 @@ class DriverProxy(object):
virtual_driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
return self.run_test(virtual_driver_input)
- pixel_tests_needed = self._pixel_tests or driver_input.is_reftest
+ pixel_tests_needed = driver_input.should_run_pixel_test
cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
if not cmd_line_key in self._running_drivers:
self._running_drivers[cmd_line_key] = self._make_driver(pixel_tests_needed)
@@ -223,7 +224,7 @@ class DriverProxy(object):
# The only reason we have this routine at all is so the perftestrunner
# can pause before running a test; it might be better to push that
# into run_test() directly.
- self._driver.start(self._pixel_tests, [])
+ self._driver.start(self._port.get_option('pixel_tests'), [])
def has_crashed(self):
return any(driver.has_crashed() for driver in self._running_drivers.values())
@@ -234,7 +235,7 @@ class DriverProxy(object):
# FIXME: this should be a @classmethod (or implemented on Port instead).
def cmd_line(self, pixel_tests=None, per_test_args=None):
- return self._driver.cmd_line(pixel_tests or self._pixel_tests, per_test_args or [])
+ return self._driver.cmd_line(pixel_tests, per_test_args or [])
def _cmd_line_as_key(self, pixel_tests, per_test_args):
return ' '.join(self.cmd_line(pixel_tests, per_test_args))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl.py b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
index 39b084951..db7be8057 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/efl.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
@@ -1,5 +1,6 @@
# Copyright (C) 2011 ProFUSION Embedded Systems. All rights reserved.
# Copyright (C) 2011 Samsung Electronics. All rights reserved.
+# Copyright (C) 2012 Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -31,20 +32,33 @@ import signal
import subprocess
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.webkit import WebKitPort
+from webkitpy.layout_tests.port.webkit import WebKitDriver, WebKitPort
+from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
-_log = logging.getLogger(__name__)
+class EflDriver(WebKitDriver):
+ def cmd_line(self, pixel_tests, per_test_args):
+ wrapper_path = self._port.path_from_webkit_base("Tools", "efl", "run-with-jhbuild")
+ return [wrapper_path] + WebKitDriver.cmd_line(self, pixel_tests, per_test_args)
-class EflPort(WebKitPort):
+class EflPort(WebKitPort, PulseAudioSanitizer):
port_name = 'efl'
def _port_flag_for_scripts(self):
return "--efl"
+ def _driver_class(self):
+ return EflDriver
+
+ def setup_test_run(self):
+ self._unload_pulseaudio_module()
+
+ def clean_up_test_run(self):
+ self._restore_pulseaudio_module()
+
def _generate_all_test_configurations(self):
- return [TestConfiguration(version=self._version, architecture='x86', build_type=build_type, graphics_type='cpu') for build_type in self.ALL_BUILD_TYPES]
+ return [TestConfiguration(version=self._version, architecture='x86', build_type=build_type) for build_type in self.ALL_BUILD_TYPES]
def _path_to_driver(self):
return self._build_path('bin', self.driver_name())
@@ -61,10 +75,6 @@ class EflPort(WebKitPort):
dyn_path = self._build_path('WebCore', 'libwebcore_efl.so')
return static_path if self._filesystem.exists(static_path) else dyn_path
- def _runtime_feature_list(self):
- # FIXME: EFL should detect runtime features like other webkit ports do.
- return None
-
def show_results_html_file(self, results_filename):
# FIXME: We should find a way to share this implmentation with Gtk,
# or teach run-launcher how to call run-safari and move this down to WebKitPort.
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory.py b/Tools/Scripts/webkitpy/layout_tests/port/factory.py
index 092106019..9f5df7c7e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/factory.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/factory.py
@@ -29,11 +29,33 @@
"""Factory method to retrieve the appropriate port implementation."""
+import optparse
import re
from webkitpy.layout_tests.port import builders
+def port_options(**help_strings):
+ return [
+ optparse.make_option("-t", "--target", dest="configuration",
+ help="(DEPRECATED)"),
+ # FIXME: --help should display which configuration is default.
+ optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
+ help='Set the configuration to Debug'),
+ optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
+ help='Set the configuration to Release'),
+ optparse.make_option('--platform', action='store',
+ help=help_strings.get('platform', 'Platform/Port being tested (e.g., "mac-lion")')),
+ optparse.make_option("--chromium", action="store_const", const='chromium', dest='platform',
+ help='Alias for --platform=chromium'),
+ optparse.make_option('--efl', action='store_const', const='efl', dest="platform",
+ help='Alias for --platform=efl'),
+ optparse.make_option('--gtk', action='store_const', const='gtk', dest="platform",
+ help='Alias for --platform=gtk'),
+ optparse.make_option('--qt', action='store_const', const='qt', dest="platform",
+ help='Alias for --platform=qt')]
+
+
class BuilderOptions(object):
def __init__(self, builder_name):
self.configuration = "Debug" if re.search(r"[d|D](ebu|b)g", builder_name) else "Release"
@@ -64,7 +86,7 @@ class PortFactory(object):
def _default_port(self, options):
platform = self._host.platform
- if platform.is_linux():
+ if platform.is_linux() or platform.is_freebsd():
return 'chromium-linux'
elif platform.is_mac():
return 'mac'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
index 4f8c01ff2..5e4e1a2ea 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
@@ -34,49 +34,24 @@ import subprocess
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port.server_process import ServerProcess
from webkitpy.layout_tests.port.webkit import WebKitDriver, WebKitPort
+from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+from webkitpy.common.system.executive import Executive
-
-_log = logging.getLogger(__name__)
-
-
-class GtkDriver(WebKitDriver):
- def _start(self, pixel_tests, per_test_args):
- # Use even displays for pixel tests and odd ones otherwise. When pixel tests are disabled,
- # DriverProxy creates two drivers, one for normal and the other for ref tests. Both have
- # the same worker number, so this prevents them from using the same Xvfb instance.
- display_id = self._worker_number * 2 + 1
- if self._pixel_tests:
- display_id += 1
- run_xvfb = ["Xvfb", ":%d" % (display_id), "-screen", "0", "800x600x24", "-nolisten", "tcp"]
- with open(os.devnull, 'w') as devnull:
- self._xvfb_process = subprocess.Popen(run_xvfb, stderr=devnull)
- server_name = self._port.driver_name()
- environment = self._port.setup_environ_for_server(server_name)
- # We must do this here because the DISPLAY number depends on _worker_number
- environment['DISPLAY'] = ":%d" % (display_id)
- self._server_process = ServerProcess(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
-
- def stop(self):
- WebKitDriver.stop(self)
- if getattr(self, '_xvfb_process', None):
- # FIXME: This should use Executive.kill_process
- os.kill(self._xvfb_process.pid, signal.SIGTERM)
- self._xvfb_process.wait()
- self._xvfb_process = None
-
- def cmd_line(self, pixel_tests, per_test_args):
- wrapper_path = self._port.path_from_webkit_base("Tools", "gtk", "run-with-jhbuild")
- return [wrapper_path] + WebKitDriver.cmd_line(self, pixel_tests, per_test_args)
-
-
-class GtkPort(WebKitPort):
+class GtkPort(WebKitPort, PulseAudioSanitizer):
port_name = "gtk"
def _port_flag_for_scripts(self):
return "--gtk"
def _driver_class(self):
- return GtkDriver
+ return XvfbDriver
+
+ def setup_test_run(self):
+ self._unload_pulseaudio_module()
+
+ def clean_up_test_run(self):
+ self._restore_pulseaudio_module()
def setup_environ_for_server(self, server_name=None):
environment = WebKitPort.setup_environ_for_server(self, server_name)
@@ -99,7 +74,7 @@ class GtkPort(WebKitPort):
def _generate_all_test_configurations(self):
configurations = []
for build_type in self.ALL_BUILD_TYPES:
- configurations.append(TestConfiguration(version=self._version, architecture='x86', build_type=build_type, graphics_type='cpu'))
+ configurations.append(TestConfiguration(version=self._version, architecture='x86', build_type=build_type))
return configurations
def _path_to_driver(self):
@@ -136,9 +111,6 @@ class GtkPort(WebKitPort):
return full_library
return None
- def _runtime_feature_list(self):
- return None
-
# FIXME: We should find a way to share this implmentation with Gtk,
# or teach run-launcher how to call run-safari and move this down to WebKitPort.
def show_results_html_file(self, results_filename):
@@ -148,3 +120,51 @@ class GtkPort(WebKitPort):
# FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"]
# FIXME: old-run-webkit-tests converted results_filename path for cygwin.
self._run_script("run-launcher", run_launcher_args)
+
+ def _get_gdb_output(self, coredump_path):
+ cmd = ['gdb', '-ex', 'thread apply all bt', '--batch', str(self._path_to_driver()), coredump_path]
+ proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ proc.wait()
+ errors = [l.strip() for l in proc.stderr.readlines()]
+ return (proc.stdout.read(), errors)
+
+ def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
+ pid_representation = str(pid or '<unknown>')
+ log_directory = os.environ.get("WEBKIT_CORE_DUMPS_DIRECTORY")
+ errors = []
+ crash_log = ''
+ expected_crash_dump_filename = "core-pid_%s-_-process_%s" % (pid_representation, name)
+
+ def match_filename(filesystem, directory, filename):
+ if pid:
+ return filename == expected_crash_dump_filename
+ return filename.find(name) > -1
+
+ if log_directory:
+ dumps = self._filesystem.files_under(log_directory, file_filter=match_filename)
+ if dumps:
+ # Get the most recent coredump matching the pid and/or process name.
+ coredump_path = list(reversed(sorted(dumps)))[0]
+ if not newer_than or self._filesystem.mtime(coredump_path) > newer_than:
+ crash_log, errors = self._get_gdb_output(coredump_path)
+
+ stderr_lines = errors + (stderr or '<empty>').decode('utf8', 'ignore').splitlines()
+ errors_str = '\n'.join(('STDERR: ' + l) for l in stderr_lines)
+ if not crash_log:
+ if not log_directory:
+ log_directory = "/path/to/coredumps"
+ core_pattern = os.path.join(log_directory, "core-pid_%p-_-process_%e")
+ crash_log = """\
+Coredump %(expected_crash_dump_filename)s not found. To enable crash logs:
+
+- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
+- enable core dumps: ulimit -c unlimited
+- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(log_directory)s
+
+""" % locals()
+
+ return """\
+Crash log for %(name)s (pid %(pid_representation)s):
+
+%(crash_log)s
+%(errors_str)s""" % locals()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
index 9ee074fd3..2b5c1a464 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
@@ -27,12 +27,16 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
+import sys
+import os
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.port.gtk import GtkPort
from webkitpy.layout_tests.port import port_testcase
from webkitpy.common.system.executive_mock import MockExecutive
-
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.tool.mocktool import MockOptions
class GtkPortTest(port_testcase.PortTestCase):
port_name = 'gtk'
@@ -43,3 +47,36 @@ class GtkPortTest(port_testcase.PortTestCase):
port._executive = MockExecutive(should_log=True)
expected_stderr = "MOCK run_command: ['Tools/Scripts/run-launcher', '--release', '--gtk', 'file://test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
+
+ def assertLinesEqual(self, a, b):
+ if hasattr(self, 'assertMultiLineEqual'):
+ self.assertMultiLineEqual(a, b)
+ else:
+ self.assertEqual(a.splitlines(), b.splitlines())
+
+
+ def test_get_crash_log(self):
+ core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
+ core_pattern = os.path.join(core_directory, "core-pid_%p-_-process_%e")
+ mock_empty_crash_log = """\
+Crash log for DumpRenderTree (pid 28529):
+
+Coredump core-pid_28529-_-process_DumpRenderTree not found. To enable crash logs:
+
+- run this command as super-user: echo "%(core_pattern)s" > /proc/sys/kernel/core_pattern
+- enable core dumps: ulimit -c unlimited
+- set the WEBKIT_CORE_DUMPS_DIRECTORY environment variable: export WEBKIT_CORE_DUMPS_DIRECTORY=%(core_directory)s
+
+
+STDERR: <empty>""" % locals()
+
+ def _mock_gdb_output(coredump_path):
+ return (mock_empty_crash_log, [])
+
+ port = self.make_port()
+ port._get_gdb_output = mock_empty_crash_log
+ log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
+ self.assertLinesEqual(log, mock_empty_crash_log)
+
+ log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
+ self.assertLinesEqual(log, mock_empty_crash_log)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py b/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py
index ee34491c1..c2eece3b0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py
@@ -26,7 +26,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This class helps to block NRWT threads when more NRWTs run
-http and websocket tests in a same time."""
+perf, http and websocket tests in a same time."""
import logging
import os
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py
index bff5a66d8..fbf2d9df3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py
@@ -46,10 +46,10 @@ class HttpLockTestWithRealFileSystem(unittest.TestCase):
def clean_all_lockfile(self):
if self.filesystem.exists(self.guard_lock_file):
- self.filesystem.unlink(self.guard_lock_file)
+ self.filesystem.remove(self.guard_lock_file)
lock_list = self.filesystem.glob(self.lock_file_path_prefix + '*')
for file_name in lock_list:
- self.filesystem.unlink(file_name)
+ self.filesystem.remove(file_name)
def assertEqual(self, first, second):
if first != second:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
index 829b896fa..9513ae5bd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mac.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -27,8 +27,13 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
+import os
import re
+import subprocess
+import time
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.common.system.executive import ScriptError
from webkitpy.layout_tests.port.apple import ApplePort
from webkitpy.layout_tests.port.leakdetector import LeakDetector
@@ -50,6 +55,22 @@ class MacPort(ApplePort):
# with MallocStackLogging enabled.
self.set_option_default("batch_size", 1000)
+ def _most_recent_version(self):
+ # This represents the most recently-shipping version of the operating system.
+ return self.VERSION_FALLBACK_ORDER[-2]
+
+ def should_retry_crashes(self):
+ # On Apple Mac, we retry crashes due to https://bugs.webkit.org/show_bug.cgi?id=82233
+ return True
+
+ def baseline_path(self):
+ if self.name() == self._most_recent_version():
+ # Baselines for the most recently shiping version should go into 'mac', not 'mac-foo'.
+ if self.get_option('webkit_test_runner'):
+ return self._webkit_baseline_path('mac-wk2')
+ return self._webkit_baseline_path('mac')
+ return ApplePort.baseline_path(self)
+
def baseline_search_path(self):
fallback_index = self.VERSION_FALLBACK_ORDER.index(self._port_name_with_version())
fallback_names = list(self.VERSION_FALLBACK_ORDER[fallback_index:])
@@ -83,15 +104,19 @@ class MacPort(ApplePort):
def is_lion(self):
return self._version == "lion"
- # Belongs on a Platform object.
- def is_crash_reporter(self, process_name):
- return re.search(r'ReportCrash', process_name)
-
def default_child_processes(self):
if self.is_snowleopard():
_log.warn("Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.")
return 1
- return super(MacPort, self).default_child_processes()
+
+ # FIXME: As a temporary workaround while we figure out what's going
+ # on with https://bugs.webkit.org/show_bug.cgi?id=83076, reduce by
+ # half the # of workers we run by default on bigger machines.
+ default_count = super(MacPort, self).default_child_processes()
+ if default_count >= 8:
+ cpu_count = self._executive.cpu_count()
+ return max(1, min(default_count, int(cpu_count / 2)))
+ return default_count
def _build_java_test_support(self):
java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")
@@ -128,7 +153,11 @@ class MacPort(ApplePort):
return self._build_path('WebCore.framework/Versions/A/WebCore')
def show_results_html_file(self, results_filename):
- self._run_script('run-safari', ['--no-saved-state', '-NSOpen', results_filename])
+ # We don't use self._run_script() because we don't want to wait for the script
+ # to exit and we want the output to show up on stdout in case there are errors
+ # launching the browser.
+ self._executive.popen([self._config.script_path('run-safari')] + self._arguments_for_configuration() + ['--no-saved-state', '-NSOpen', results_filename],
+ cwd=self._config.webkit_base_dir(), stdout=file(os.devnull), stderr=file(os.devnull))
# FIXME: The next two routines turn off the http locking in order
# to work around failures on the bots caused when the slave restarts.
@@ -146,3 +175,67 @@ class MacPort(ApplePort):
def release_http_lock(self):
pass
+
+ def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None):
+ # Note that we do slow-spin here and wait, since it appears the time
+ # ReportCrash takes to actually write and flush the file varies when there are
+ # lots of simultaneous crashes going on.
+ # FIXME: Should most of this be moved into CrashLogs()?
+ time_fn = time_fn or time.time
+ sleep_fn = sleep_fn or time.sleep
+ crash_log = ''
+ crash_logs = CrashLogs(self.host)
+ now = time_fn()
+ # FIXME: delete this after we're sure this code is working ...
+ _log.debug('looking for crash log for %s:%s' % (name, str(pid)))
+ deadline = now + 5 * int(self.get_option('child_processes', 1))
+ while not crash_log and now <= deadline:
+ crash_log = crash_logs.find_newest_log(name, pid, include_errors=True, newer_than=newer_than)
+ if not crash_log or not [line for line in crash_log.splitlines() if not line.startswith('ERROR')]:
+ sleep_fn(0.1)
+ now = time_fn()
+ if not crash_log:
+ crash_log = 'no crash log found for %s:%d' % (name, pid)
+ _log.warning(crash_log)
+ return crash_log
+
+ def sample_process(self, name, pid):
+ try:
+ hang_report = self._filesystem.join(self.results_directory(), "%s-%s.sample.txt" % (name, pid))
+ self._executive.run_command([
+ "/usr/bin/sample",
+ pid,
+ 10,
+ 10,
+ "-file",
+ hang_report,
+ ])
+ except ScriptError, e:
+ _log.warning('Unable to sample process.')
+
+ def _path_to_helper(self):
+ binary_name = 'LayoutTestHelper'
+ return self._build_path(binary_name)
+
+ def start_helper(self):
+ helper_path = self._path_to_helper()
+ if helper_path:
+ _log.debug("Starting layout helper %s" % helper_path)
+ # Note: Not thread safe: http://bugs.python.org/issue2320
+ self._helper = self._executive.popen([helper_path],
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
+ is_ready = self._helper.stdout.readline()
+ if not is_ready.startswith('ready'):
+ _log.error("LayoutTestHelper failed to be ready")
+
+ def stop_helper(self):
+ if self._helper:
+ _log.debug("Stopping LayoutTestHelper")
+ try:
+ self._helper.stdin.write("x\n")
+ self._helper.stdin.close()
+ self._helper.wait()
+ except IOError, e:
+ _log.debug("IOError raised while stopping helper: %s" % str(e))
+ pass
+ self._helper = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
index 35e64e49f..0b9bca978 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
@@ -31,7 +31,7 @@ from webkitpy.layout_tests.port import port_testcase
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions
-from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2, MockProcess, ScriptError
from webkitpy.common.system.systemhost_mock import MockSystemHost
@@ -41,15 +41,23 @@ class MacTest(port_testcase.PortTestCase):
port_name = 'mac-leopard'
port_maker = MacPort
- def assert_skipped_file_search_paths(self, port_name, expected_paths):
- port = self.make_port(port_name=port_name)
+ def assert_skipped_file_search_paths(self, port_name, expected_paths, use_webkit2=False):
+ port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
self.assertEqual(port._skipped_file_search_paths(), expected_paths)
def test_skipped_file_search_paths(self):
self.assert_skipped_file_search_paths('mac-snowleopard', set(['mac-snowleopard', 'mac']))
self.assert_skipped_file_search_paths('mac-leopard', set(['mac-leopard', 'mac']))
- # We cannot test just "mac" here as the MacPort constructor automatically fills in the version from the running OS.
- # self.assert_skipped_file_search_paths('mac', ['mac'])
+ self.assert_skipped_file_search_paths('mac-lion', set(['mac-lion', 'mac']))
+
+ # Note that there is no platform/mac-future/Skipped.
+ self.assert_skipped_file_search_paths('mac-future', set(['mac']))
+
+ self.assert_skipped_file_search_paths('mac-snowleopard', set(['mac-snowleopard', 'mac', 'mac-wk2', 'wk2']), use_webkit2=True)
+ self.assert_skipped_file_search_paths('mac-leopard', set(['mac-leopard', 'mac', 'mac-wk2', 'wk2']), use_webkit2=True)
+ self.assert_skipped_file_search_paths('mac-lion', set(['mac-lion', 'mac', 'mac-wk2', 'wk2']), use_webkit2=True)
+ self.assert_skipped_file_search_paths('mac-future', set(['mac', 'mac-wk2', 'wk2']), use_webkit2=True)
+
example_skipped_file = u"""
@@ -139,29 +147,32 @@ java/
self.assertEquals(env['MallocStackLogging'], '1')
self.assertEquals(env['DYLD_INSERT_LIBRARIES'], '/usr/lib/libgmalloc.dylib')
- def _assert_search_path(self, search_paths, version, use_webkit2=False):
- # FIXME: Port constructors should not "parse" the port name, but
- # rather be passed components (directly or via setters). Once
- # we fix that, this method will need a re-write.
- port = self.make_port(port_name='mac-%s' % version, options=MockOptions(webkit_test_runner=use_webkit2))
+ def _assert_search_path(self, port_name, baseline_path, search_paths, use_webkit2=False):
+ port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
absolute_search_paths = map(port._webkit_baseline_path, search_paths)
+ self.assertEquals(port.baseline_path(), port._webkit_baseline_path(baseline_path))
self.assertEquals(port.baseline_search_path(), absolute_search_paths)
def test_baseline_search_path(self):
- # FIXME: Is this really right? Should mac-leopard fallback to mac-snowleopard?
- self._assert_search_path(['mac-leopard', 'mac-snowleopard', 'mac-lion', 'mac'], 'leopard')
- self._assert_search_path(['mac-snowleopard', 'mac-lion', 'mac'], 'snowleopard')
- self._assert_search_path(['mac-lion', 'mac'], 'lion')
+ self._assert_search_path('mac-leopard', 'mac-leopard', ['mac-leopard', 'mac-snowleopard', 'mac-lion', 'mac'])
+ self._assert_search_path('mac-snowleopard', 'mac-snowleopard', ['mac-snowleopard', 'mac-lion', 'mac'])
+
+ # Note that mac-lion writes baselines into mac, not mac-lion! (but it will read from mac-lion)
+ self._assert_search_path('mac-lion', 'mac', ['mac-lion', 'mac'])
- self._assert_search_path(['mac-wk2', 'mac-leopard', 'mac-snowleopard', 'mac-lion', 'mac'], 'leopard', use_webkit2=True)
- self._assert_search_path(['mac-wk2', 'mac-snowleopard', 'mac-lion', 'mac'], 'snowleopard', use_webkit2=True)
- self._assert_search_path(['mac-wk2', 'mac-lion', 'mac'], 'lion', use_webkit2=True)
+ # Note that there is no 'mac-future'; it uses the 'mac' directory as well.
+ self._assert_search_path('mac-future', 'mac', ['mac'])
+
+ self._assert_search_path('mac-leopard', 'mac-wk2', ['mac-wk2', 'mac-leopard', 'mac-snowleopard', 'mac-lion', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-snowleopard', 'mac-wk2', ['mac-wk2', 'mac-snowleopard', 'mac-lion', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-lion', 'mac-wk2', ['mac-wk2', 'mac-lion', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-future', 'mac-wk2', ['mac-wk2', 'mac'], use_webkit2=True)
def test_show_results_html_file(self):
port = self.make_port()
# Delay setting a should_log executive to avoid logging from MacPort.__init__.
port._executive = MockExecutive(should_log=True)
- expected_stderr = "MOCK run_command: ['Tools/Scripts/run-safari', '--release', '--no-saved-state', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
+ expected_stderr = "MOCK popen: ['Tools/Scripts/run-safari', '--release', '--no-saved-state', '-NSOpen', 'test.html'], cwd=/mock-checkout\n"
OutputCapture().assert_outputs(self, port.show_results_html_file, ["test.html"], expected_stderr=expected_stderr)
def test_operating_system(self):
@@ -177,3 +188,68 @@ java/
expected_logs = "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.\n"
child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
+
+ def test_get_crash_log(self):
+ # Mac crash logs are tested elsewhere, so here we just make sure we don't crash.
+ def fake_time_cb():
+ times = [0, 20, 40]
+ return lambda: times.pop(0)
+ port = self.make_port(port_name='mac-snowleopard')
+ port._get_crash_log('DumpRenderTree', 1234, '', '', 0,
+ time_fn=fake_time_cb(), sleep_fn=lambda delay: None)
+
+ def test_helper_starts(self):
+ host = MockSystemHost(MockExecutive())
+ port = self.make_port(host)
+ oc = OutputCapture()
+ oc.capture_output()
+ host.executive._proc = MockProcess('ready\n')
+ port.start_helper()
+ port.stop_helper()
+ oc.restore_output()
+
+ # make sure trying to stop the helper twice is safe.
+ port.stop_helper()
+
+ def test_helper_fails_to_start(self):
+ host = MockSystemHost(MockExecutive())
+ port = self.make_port(host)
+ oc = OutputCapture()
+ oc.capture_output()
+ port.start_helper()
+ port.stop_helper()
+ oc.restore_output()
+
+ def test_helper_fails_to_stop(self):
+ host = MockSystemHost(MockExecutive())
+ host.executive._proc = MockProcess()
+
+ def bad_waiter():
+ raise IOError('failed to wait')
+ host.executive._proc.wait = bad_waiter
+
+ port = self.make_port(host)
+ oc = OutputCapture()
+ oc.capture_output()
+ port.start_helper()
+ port.stop_helper()
+ oc.restore_output()
+
+ def test_sample_process(self):
+
+ def logging_run_command(args):
+ print args
+
+ port = self.make_port()
+ port._executive = MockExecutive2(run_command_fn=logging_run_command)
+ expected_stdout = "['/usr/bin/sample', 42, 10, 10, '-file', '/mock-build/layout-test-results/test-42.sample.txt']\n"
+ OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42], expected_stdout=expected_stdout)
+
+ def test_sample_process_throws_exception(self):
+
+ def throwing_run_command(args):
+ raise ScriptError("MOCK script error")
+
+ port = self.make_port()
+ port._executive = MockExecutive2(run_command_fn=throwing_run_command)
+ OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
index 3d41ebdde..cc3dc7e63 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
@@ -161,7 +161,7 @@ def parse_options(argv):
else:
pixel_tests = '--pixel-tests' in argv
options = MockOptions(chromium=chromium, platform=platform, pixel_tests=pixel_tests, pixel_path=pixel_path)
- return (options, [])
+ return (options, argv)
class MockDRT(object):
@@ -184,7 +184,12 @@ class MockDRT(object):
line = self._stdin.readline()
if not line:
return 0
- self.run_one_test(self.input_from_line(line))
+ driver_input = self.input_from_line(line)
+ dirname, basename = self._port.split_test(driver_input.test_name)
+ is_reftest = (self._port.reference_files(driver_input.test_name) or
+ self._port.is_reference_html_file(self._port._filesystem, dirname, basename))
+ output = self.output_for_test(driver_input, is_reftest)
+ self.write_test_output(driver_input, output, is_reftest)
def input_from_line(self, line):
vals = line.strip().split("'")
@@ -199,24 +204,22 @@ class MockDRT(object):
else:
test_name = self._port.relative_test_filename(uri)
- is_reftest = (self._port.reference_files(test_name) or
- test_name.endswith('-expected.html') or
- test_name.endswith('-mismatch.html'))
- return DriverInput(test_name, 0, checksum, is_reftest)
+ return DriverInput(test_name, 0, checksum, self._options.pixel_tests)
- def output_for_test(self, test_input):
+ def output_for_test(self, test_input, is_reftest):
port = self._port
actual_text = port.expected_text(test_input.test_name)
actual_audio = port.expected_audio(test_input.test_name)
actual_image = None
actual_checksum = None
- if test_input.is_reftest:
+ if is_reftest:
# Make up some output for reftests.
actual_text = 'reference text\n'
- actual_checksum = 'None'
+ actual_checksum = 'mock-checksum'
actual_image = 'blank'
- if test_name.endswith('-mismatch.html'):
- actual_checksum = 'True'
+ if test_input.test_name.endswith('-mismatch.html'):
+ actual_text = 'not reference text\n'
+ actual_checksum = 'not-mock-checksum'
actual_image = 'not blank'
elif self._options.pixel_tests and test_input.image_hash:
actual_checksum = port.expected_checksum(test_input.test_name)
@@ -224,9 +227,7 @@ class MockDRT(object):
return DriverOutput(actual_text, actual_image, actual_checksum, actual_audio)
- def run_one_test(self, test_input):
- output = self.output_for_test(test_input)
-
+ def write_test_output(self, test_input, output, is_reftest):
if output.audio:
self._stdout.write('Content-Type: audio/wav\n')
self._stdout.write('Content-Transfer-Encoding: base64\n')
@@ -235,11 +236,12 @@ class MockDRT(object):
self._stdout.write('Content-Type: text/plain\n')
# FIXME: Note that we don't ensure there is a trailing newline!
# This mirrors actual (Mac) DRT behavior but is a bug.
- self._stdout.write(output.text)
+ if output.text:
+ self._stdout.write(output.text)
self._stdout.write('#EOF\n')
- if self._options.pixel_tests and (test_input.image_hash or is_reftest):
+ if self._options.pixel_tests and output.image_hash:
self._stdout.write('\n')
self._stdout.write('ActualHash: %s\n' % output.image_hash)
self._stdout.write('ExpectedHash: %s\n' % test_input.image_hash)
@@ -263,26 +265,28 @@ class MockChromiumDRT(MockDRT):
checksum = None
test_name = self._driver.uri_to_test(uri)
- is_reftest = (self._port.reference_files(test_name) or
- test_name.endswith('-expected.html') or
- test_name.endswith('-mismatch.html'))
-
- return DriverInput(test_name, timeout, checksum, is_reftest)
-
- def run_one_test(self, test_input):
- output = self.output_for_test(test_input)
-
+ return DriverInput(test_name, timeout, checksum, self._options.pixel_tests)
+
+ def output_for_test(self, test_input, is_reftest):
+ # FIXME: This is a hack to make virtual tests work. Need something more general.
+ original_test_name = test_input.test_name
+ if '--enable-accelerated-2d-canvas' in self._args and 'canvas' in test_input.test_name:
+ test_input.test_name = 'platform/chromium/virtual/gpu/' + test_input.test_name
+ output = super(MockChromiumDRT, self).output_for_test(test_input, is_reftest)
+ test_input.test_name = original_test_name
+ return output
+
+ def write_test_output(self, test_input, output, is_reftest):
self._stdout.write("#URL:%s\n" % self._driver.test_to_uri(test_input.test_name))
- if self._options.pixel_tests and (test_input.image_hash or test_input.is_reftest):
+ if self._options.pixel_tests and output.image_hash:
self._stdout.write("#MD5:%s\n" % output.image_hash)
- self._host.filesystem.maybe_make_directory(self._host.filesystem.dirname(self._options.pixel_path))
- self._host.filesystem.write_binary_file(self._options.pixel_path,
- output.image)
- self._stdout.write(output.text)
-
- # FIXME: (See above FIXME as well). Chromium DRT appears to always
- # ensure the text output has a trailing newline. Mac DRT does not.
- if not output.text.endswith('\n'):
+ if output.image:
+ self._host.filesystem.maybe_make_directory(self._host.filesystem.dirname(self._options.pixel_path))
+ self._host.filesystem.write_binary_file(self._options.pixel_path, output.image)
+ if output.text:
+ self._stdout.write(output.text)
+
+ if output.text and not output.text.endswith('\n'):
self._stdout.write('\n')
self._stdout.write('#EOF\n')
self._stdout.flush()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
index 0b85579f6..605071805 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
@@ -56,10 +56,6 @@ class MockDRTPortTest(port_testcase.PortTestCase):
return mock_drt.MockDRTPort(host, port_name='mock-chromium-win', options=options)
return mock_drt.MockDRTPort(host, port_name='mock-mac', options=options)
- def test_default_worker_model(self):
- # only overridding the default test; we don't care about this one.
- pass
-
def test_port_name_in_constructor(self):
self.assertTrue(mock_drt.MockDRTPort(MockSystemHost(), port_name='mock-test'))
@@ -90,6 +86,8 @@ class MockDRTPortTest(port_testcase.PortTestCase):
def integration_test_helper(self):
pass
+ def test_get_crash_log(self):
+ pass
class MockDRTTest(unittest.TestCase):
def input_line(self, port, test_name, checksum=None):
@@ -113,13 +111,13 @@ class MockDRTTest(unittest.TestCase):
return mock_drt.MockDRT(options, args, host, stdin, stdout, stderr)
def make_input_output(self, port, test_name, pixel_tests,
- expected_checksum, drt_output, drt_input=None):
+ expected_checksum, drt_output, drt_input=None, expected_text=None):
if pixel_tests:
if not expected_checksum:
expected_checksum = port.expected_checksum(test_name)
if not drt_input:
drt_input = self.input_line(port, test_name, expected_checksum)
- text_output = port.expected_text(test_name)
+ text_output = expected_text or port.expected_text(test_name) or ''
if not drt_output:
drt_output = self.expected_output(port, test_name, pixel_tests,
@@ -127,27 +125,24 @@ class MockDRTTest(unittest.TestCase):
return (drt_input, drt_output)
def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
+ output = ['Content-Type: text/plain\n']
+ if text_output:
+ output.append(text_output)
+ output.append('#EOF\n')
if pixel_tests and expected_checksum:
- return ['Content-Type: text/plain\n',
- text_output,
- '#EOF\n',
- '\n',
- 'ActualHash: %s\n' % expected_checksum,
- 'ExpectedHash: %s\n' % expected_checksum,
- '#EOF\n']
- else:
- return ['Content-Type: text/plain\n',
- text_output,
- '#EOF\n',
- '#EOF\n']
-
- def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None):
+ output.extend(['\n',
+ 'ActualHash: %s\n' % expected_checksum,
+ 'ExpectedHash: %s\n' % expected_checksum])
+ output.append('#EOF\n')
+ return output
+
+ def assertTest(self, test_name, pixel_tests, expected_checksum=None, drt_output=None, host=None, expected_text=None):
port_name = 'test'
host = host or MockSystemHost()
test.add_unit_tests_to_mock_filesystem(host.filesystem)
port = PortFactory(host).get(port_name)
drt_input, drt_output = self.make_input_output(port, test_name,
- pixel_tests, expected_checksum, drt_output)
+ pixel_tests, expected_checksum, drt_output, drt_input=None, expected_text=expected_text)
args = ['--platform', port_name] + self.extra_args(pixel_tests)
stdin = newstringio.StringIO(drt_input)
@@ -199,6 +194,21 @@ class MockDRTTest(unittest.TestCase):
def test_checksum_in_png(self):
self.assertTest('passes/checksum_in_image.html', True)
+ def test_missing_image(self):
+ self.assertTest('failures/expected/missing_image.html', True)
+
+ def test_missing_text(self):
+ self.assertTest('failures/expected/missing_text.html', True)
+
+ def test_reftest_match(self):
+ self.assertTest('passes/reftest.html', False, expected_checksum='mock-checksum', expected_text='reference text\n')
+ self.assertTest('passes/reftest.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
+
+ def test_reftest_mismatch(self):
+ self.assertTest('passes/mismatch.html', False, expected_checksum='mock-checksum', expected_text='reference text\n')
+ self.assertTest('passes/mismatch.html', True, expected_checksum='mock-checksum', expected_text='reference text\n')
+
+
class MockChromiumDRTTest(MockDRTTest):
def extra_args(self, pixel_tests):
@@ -224,17 +234,15 @@ class MockChromiumDRTTest(MockDRTTest):
def expected_output(self, port, test_name, pixel_tests, text_output, expected_checksum):
url = port.create_driver(0).test_to_uri(test_name)
- if pixel_tests and expected_checksum:
- return ['#URL:%s\n' % url,
- '#MD5:%s\n' % expected_checksum,
- text_output,
- '\n',
- '#EOF\n']
- else:
- return ['#URL:%s\n' % url,
- text_output,
- '\n',
- '#EOF\n']
+ output = ['#URL:%s\n' % url]
+ if expected_checksum:
+ output.append('#MD5:%s\n' % expected_checksum)
+ if text_output:
+ output.append(text_output)
+ if not text_output.endswith('\n'):
+ output.append('\n')
+ output.append('#EOF\n')
+ return output
def test_pixeltest__fails(self):
host = MockSystemHost()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
index 02d6f23dc..4bdc3ed13 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -40,6 +40,7 @@ from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.servers import http_server_base
from webkitpy.layout_tests.servers import http_server_base
from webkitpy.layout_tests.port import factory
+from webkitpy.layout_tests.port.config_mock import MockConfig
from webkitpy.tool.mocktool import MockOptions
@@ -52,18 +53,14 @@ class PortTestCase(unittest.TestCase):
os_name = None
os_version = None
port_maker = None
- expected_default_worker_model = 'processes'
- def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
+ def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, config=None, **kwargs):
host = host or MockSystemHost(os_name=(os_name or self.os_name), os_version=(os_version or self.os_version))
options = options or MockOptions(configuration='Release')
+ config = config or MockConfig(filesystem=host.filesystem, default_configuration='Release')
port_name = port_name or self.port_name
port_name = self.port_maker.determine_full_port_name(host, options, port_name)
- return self.port_maker(host, port_name, options=options, **kwargs)
-
- def test_default_worker_model(self):
- port = self.make_port()
- self.assertEqual(port.default_worker_model(), self.expected_default_worker_model)
+ return self.port_maker(host, port_name, options=options, config=config, **kwargs)
def test_driver_cmd_line(self):
port = self.make_port()
@@ -312,6 +309,30 @@ class PortTestCase(unittest.TestCase):
i += 1
+ def test_get_crash_log(self):
+ port = self.make_port()
+ self.assertEquals(port._get_crash_log(None, None, None, None, newer_than=None),
+ ('crash log for <unknown process name> (pid <unknown>):\n'
+ 'STDOUT: <empty>\n'
+ 'STDERR: <empty>\n'))
+
+ self.assertEquals(port._get_crash_log('foo', 1234, 'out bar\nout baz', 'err bar\nerr baz\n', newer_than=None),
+ ('crash log for foo (pid 1234):\n'
+ 'STDOUT: out bar\n'
+ 'STDOUT: out baz\n'
+ 'STDERR: err bar\n'
+ 'STDERR: err baz\n'))
+
+ self.assertEquals(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=None),
+ (u'crash log for foo (pid 1234):\n'
+ u'STDOUT: foo\ufffdbar\n'
+ u'STDERR: foo\ufffdbar\n'))
+
+ self.assertEquals(port._get_crash_log('foo', 1234, 'foo\xa6bar', 'foo\xa6bar', newer_than=1.0),
+ (u'crash log for foo (pid 1234):\n'
+ u'STDOUT: foo\ufffdbar\n'
+ u'STDERR: foo\ufffdbar\n'))
+
# FIXME: This class and main() should be merged into test-webkitpy.
class EnhancedTestLoader(unittest.TestLoader):
integration_tests = False
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py b/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py
new file mode 100644
index 000000000..f4574a92f
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py
@@ -0,0 +1,85 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyrigth (C) 2012 Intel Corporation
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import subprocess
+
+
+_log = logging.getLogger(__name__)
+
+
+# Shared by GTK and EFL for pulseaudio sanitizing before running tests.
+class PulseAudioSanitizer:
+ def _unload_pulseaudio_module(self):
+ # Unload pulseaudio's module-stream-restore, since it remembers
+ # volume settings from different runs, and could affect
+ # multimedia tests results
+ self._pa_module_index = -1
+ with open(os.devnull, 'w') as devnull:
+ try:
+ pactl_process = subprocess.Popen(["pactl", "list", "short", "modules"], stdout=subprocess.PIPE, stderr=devnull)
+ pactl_process.wait()
+ except OSError:
+ # pactl might not be available.
+ _log.debug('pactl not found. Please install pulseaudio-utils to avoid some potential media test failures.')
+ return
+ modules_list = pactl_process.communicate()[0]
+ for module in modules_list.splitlines():
+ if module.find("module-stream-restore") >= 0:
+ # Some pulseaudio-utils versions don't provide
+ # the index, just an empty string
+ self._pa_module_index = module.split('\t')[0] or -1
+ try:
+ # Since they could provide other stuff (not an index
+ # nor an empty string, let's make sure this is an int.
+ if int(self._pa_module_index) != -1:
+ pactl_process = subprocess.Popen(["pactl", "unload-module", self._pa_module_index])
+ pactl_process.wait()
+ if pactl_process.returncode == 0:
+ _log.debug('Unloaded module-stream-restore successfully')
+ else:
+ _log.debug('Unloading module-stream-restore failed')
+ except ValueError:
+ # pactl should have returned an index if the module is found
+ _log.debug('Unable to parse module index. Please check if your pulseaudio-utils version is too old.')
+ return
+
+ def _restore_pulseaudio_module(self):
+ # If pulseaudio's module-stream-restore was previously unloaded,
+ # restore it back. We shouldn't need extra checks here, since an
+ # index != -1 here means we successfully unloaded it previously.
+ if self._pa_module_index != -1:
+ with open(os.devnull, 'w') as devnull:
+ pactl_process = subprocess.Popen(["pactl", "load-module", "module-stream-restore"], stdout=devnull, stderr=devnull)
+ pactl_process.wait()
+ if pactl_process.returncode == 0:
+ _log.debug('Restored module-stream-restore successfully')
+ else:
+ _log.debug('Restoring module-stream-restore failed')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt.py b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
index 5fc60a590..de93bf47f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/qt.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
@@ -38,7 +38,7 @@ import webkit
from webkitpy.common.memoized import memoized
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port.webkit import WebKitPort
-
+from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
_log = logging.getLogger(__name__)
@@ -74,13 +74,16 @@ class QtPort(WebKitPort):
configurations = []
for version in self.ALL_VERSIONS:
for build_type in self.ALL_BUILD_TYPES:
- configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type, graphics_type='cpu'))
+ configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type))
return configurations
def _build_driver(self):
# The Qt port builds DRT as part of the main build step
return True
+ def _driver_class(self):
+ return XvfbDriver
+
def _path_to_driver(self):
return self._build_path('bin/%s' % self.driver_name())
@@ -132,13 +135,14 @@ class QtPort(WebKitPort):
search_paths.add('qt-5.0-wk1')
return search_paths
- def _runtime_feature_list(self):
- return None
-
def setup_environ_for_server(self, server_name=None):
clean_env = WebKitPort.setup_environ_for_server(self, server_name)
clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins')
self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE')
+ self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX')
+ self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_LOG')
+ self._copy_value_from_environ_if_set(clean_env, 'DISABLE_NI_WARNING')
+ self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_PAUSE_UI_PROCESS')
return clean_env
# FIXME: We should find a way to share this implmentation with Gtk,
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py
index 2679f33f1..0cc694780 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py
@@ -81,7 +81,7 @@ class QtPortTest(port_testcase.PortTestCase):
def test_setup_environ_for_server(self):
port = self.make_port()
env = port.setup_environ_for_server(port.driver_name())
- self.assertEquals(env['QTWEBKIT_PLUGIN_PATH'], 'MOCK output of child process/lib/plugins')
+ self.assertEquals(env['QTWEBKIT_PLUGIN_PATH'], '/mock-build/lib/plugins')
def test_operating_system(self):
self.assertEqual('linux', self.make_port(port_name='qt-linux', os_name='linux').operating_system())
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
index 6c48fef64..108cc5dbd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
@@ -29,36 +29,49 @@
"""Package that implements the ServerProcess wrapper class"""
+import errno
import logging
-import os
-import select
import signal
import subprocess
import sys
import time
-if sys.platform != 'win32':
+
+# Note that although win32 python does provide an implementation of
+# the win32 select API, it only works on sockets, and not on the named pipes
+# used by subprocess, so we have to use the native APIs directly.
+if sys.platform == 'win32':
+ import msvcrt
+ import win32pipe
+ import win32file
+else:
import fcntl
+ import os
+ import select
-from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.executive import ScriptError
_log = logging.getLogger(__name__)
-class ServerProcess:
+class ServerProcess(object):
"""This class provides a wrapper around a subprocess that
implements a simple request/response usage model. The primary benefit
is that reading responses takes a deadline, so that we don't ever block
indefinitely. The class also handles transparently restarting processes
as necessary to keep issuing commands."""
- def __init__(self, port_obj, name, cmd, env=None, executive=Executive()):
+ def __init__(self, port_obj, name, cmd, env=None):
self._port = port_obj
self._name = name # Should be the command name (e.g. DumpRenderTree, ImageDiff)
self._cmd = cmd
self._env = env
+ self._host = self._port.host
self._reset()
- self._executive = executive
+
+ # See comment in imports for why we need the win32 APIs and can't just use select.
+ # FIXME: there should be a way to get win32 vs. cygwin from platforminfo.
+ self._use_win32_apis = sys.platform == 'win32'
def name(self):
return self._name
@@ -70,7 +83,7 @@ class ServerProcess:
self._proc = None
self._output = str() # bytesarray() once we require Python 2.6
self._error = str() # bytesarray() once we require Python 2.6
- self.set_crashed(False)
+ self._crashed = False
self.timed_out = False
def process_name(self):
@@ -81,35 +94,31 @@ class ServerProcess:
raise ValueError("%s already running" % self._name)
self._reset()
# close_fds is a workaround for http://bugs.python.org/issue2320
- close_fds = sys.platform not in ('win32', 'cygwin')
+ close_fds = not self._host.platform.is_win()
self._proc = subprocess.Popen(self._cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=close_fds,
env=self._env)
fd = self._proc.stdout.fileno()
- fl = fcntl.fcntl(fd, fcntl.F_GETFL)
- fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
- fd = self._proc.stderr.fileno()
- fl = fcntl.fcntl(fd, fcntl.F_GETFL)
- fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
-
- def handle_interrupt(self):
+ if not self._use_win32_apis:
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+ fd = self._proc.stderr.fileno()
+ fl = fcntl.fcntl(fd, fcntl.F_GETFL)
+ fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
+
+ def _handle_possible_interrupt(self):
"""This routine checks to see if the process crashed or exited
because of a keyboard interrupt and raises KeyboardInterrupt
accordingly."""
- if self.crashed:
- # This is hex code 0xc000001d, which is used for abrupt
- # termination. This happens if we hit ctrl+c from the prompt
- # and we happen to be waiting on the DumpRenderTree.
- # sdoyon: Not sure for which OS and in what circumstances the
- # above code is valid. What works for me under Linux to detect
- # ctrl+c is for the subprocess returncode to be negative
- # SIGINT. And that agrees with the subprocess documentation.
- if (-1073741510 == self._proc.returncode or
- - signal.SIGINT == self._proc.returncode):
- raise KeyboardInterrupt
- return
+ # FIXME: Linux and Mac set the returncode to -signal.SIGINT if a
+ # subprocess is killed with a ctrl^C. Previous comments in this
+ # routine said that supposedly Windows returns 0xc000001d, but that's not what
+ # -1073741510 evaluates to. Figure out what the right value is
+ # for win32 and cygwin here ...
+ if self._proc.returncode in (-1073741510, -signal.SIGINT):
+ raise KeyboardInterrupt
def poll(self):
"""Check to see if the underlying process is running; returns None
@@ -128,7 +137,7 @@ class ServerProcess:
except IOError, e:
self.stop()
# stop() calls _reset(), so we have to set crashed to True after calling stop().
- self.set_crashed(True)
+ self._crashed = True
def _pop_stdout_line_if_ready(self):
index_after_newline = self._output.find('\n') + 1
@@ -178,40 +187,15 @@ class ServerProcess:
return self._read(deadline, retrieve_bytes_from_stdout_buffer)
- def _check_for_crash(self, wait_for_crash_reporter=True):
- if self.poll() != None:
- self.set_crashed(True, wait_for_crash_reporter)
- self.handle_interrupt()
-
def _log(self, message):
# This is a bit of a hack, but we first log a blank line to avoid
# messing up the master process's output.
_log.info('')
_log.info(message)
- def _sample(self):
- if sys.platform != "darwin":
- return
- try:
- hang_report = os.path.join(self._port.results_directory(), "%s-%s.sample.txt" % (self._name, self._proc.pid))
- self._executive.run_command([
- "/usr/bin/sample",
- self._proc.pid,
- 10,
- 10,
- "-file",
- hang_report,
- ])
- except ScriptError, e:
- self._log('Unable to sample process.')
-
def _handle_timeout(self):
- self._executive.wait_newest(self._port.is_crash_reporter)
- self._check_for_crash(wait_for_crash_reporter=False)
- if self.crashed:
- return
self.timed_out = True
- self._sample()
+ self._port.sample_process(self._name, self._proc.pid)
def _split_string_after_index(self, string, index):
return string[:index], string[index:]
@@ -224,41 +208,86 @@ class ServerProcess:
output, self._error = self._split_string_after_index(self._error, bytes_count)
return output
- def _wait_for_data_and_update_buffers(self, deadline):
+ def _wait_for_data_and_update_buffers_using_select(self, deadline):
out_fd = self._proc.stdout.fileno()
err_fd = self._proc.stderr.fileno()
select_fds = (out_fd, err_fd)
- read_fds, _, _ = select.select(select_fds, [], select_fds, deadline - time.time())
+ try:
+ read_fds, _, _ = select.select(select_fds, [], select_fds, deadline - time.time())
+ except select.error, e:
+ # We can ignore EINVAL since it's likely the process just crashed and we'll
+ # figure that out the next time through the loop in _read().
+ if e.args[0] == errno.EINVAL:
+ return
+ raise
+
try:
if out_fd in read_fds:
self._output += self._proc.stdout.read()
if err_fd in read_fds:
self._error += self._proc.stderr.read()
except IOError, e:
- # FIXME: Why do we ignore all IOErrors here?
+ # We can ignore the IOErrors because we will detect if the subporcess crashed
+ # the next time through the loop in _read()
pass
- def _check_for_abort(self, deadline):
- self._check_for_crash()
-
- if time.time() > deadline:
- self._handle_timeout()
+ def _wait_for_data_and_update_buffers_using_win32_apis(self, deadline):
+ # See http://code.activestate.com/recipes/440554-module-to-allow-asynchronous-subprocess-use-on-win/
+ # and http://docs.activestate.com/activepython/2.6/pywin32/modules.html
+ # for documentation on all of these win32-specific modules.
+ now = time.time()
+ out_fh = msvcrt.get_osfhandle(self._proc.stdout.fileno())
+ err_fh = msvcrt.get_osfhandle(self._proc.stderr.fileno())
+ while (self._proc.poll() is None) and (now < deadline):
+ output = self._non_blocking_read_win32(out_fh)
+ error = self._non_blocking_read_win32(err_fh)
+ if output or error:
+ if output:
+ self._output += output
+ if error:
+ self._error += error
+ return
+ time.sleep(0.01)
+ now = time.time()
+ return
+
+ def _non_blocking_read_win32(self, handle):
+ try:
+ _, avail, _ = win32pipe.PeekNamedPipe(handle, 0)
+ if avail > 0:
+ _, buf = win32file.ReadFile(handle, avail, None)
+ return buf
+ except Exception, e:
+ if e[0] not in (109, errno.ESHUTDOWN): # 109 == win32 ERROR_BROKEN_PIPE
+ raise
+ return None
- return self.crashed or self.timed_out
+ def has_crashed(self):
+ if not self._crashed and self.poll():
+ self._crashed = True
+ self._handle_possible_interrupt()
+ return self._crashed
# This read function is a bit oddly-designed, as it polls both stdout and stderr, yet
# only reads/returns from one of them (buffering both in local self._output/self._error).
# It might be cleaner to pass in the file descriptor to poll instead.
def _read(self, deadline, fetch_bytes_from_buffers_callback):
while True:
- if self._check_for_abort(deadline):
+ if self.has_crashed():
+ return None
+
+ if time.time() > deadline:
+ self._handle_timeout()
return None
bytes = fetch_bytes_from_buffers_callback()
if bytes is not None:
return bytes
- self._wait_for_data_and_update_buffers(deadline)
+ if self._use_win32_apis:
+ self._wait_for_data_and_update_buffers_using_win32_apis(deadline)
+ else:
+ self._wait_for_data_and_update_buffers_using_select(deadline)
def start(self):
if not self._proc:
@@ -277,7 +306,7 @@ class ServerProcess:
self._proc.stdout.close()
if self._proc.stderr:
self._proc.stderr.close()
- if sys.platform not in ('win32', 'cygwin'):
+ if not self._host.platform.is_win():
# Closing stdin/stdout/stderr hangs sometimes on OS X,
# (see restart(), above), and anyway we don't want to hang
# the harness if DumpRenderTree is buggy, so we wait a couple
@@ -289,12 +318,13 @@ class ServerProcess:
time.sleep(0.01)
if self._proc.poll() is None:
_log.warning('stopping %s timed out, killing it' % self._name)
- self._executive.kill_process(self._proc.pid)
+ self.kill()
_log.warning('killed')
self._reset()
- def set_crashed(self, crashed, wait_for_crash_reporter=True):
- self.crashed = crashed
- if not self.crashed or not wait_for_crash_reporter:
- return
- self._executive.wait_newest(self._port.is_crash_reporter)
+ def kill(self):
+ if self._proc:
+ self._host.executive.kill_process(self._proc.pid)
+ if self._proc.poll() is not None:
+ self._proc.wait()
+ self._reset()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
index cae6b02c5..4257d5455 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
@@ -27,32 +27,27 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
+import time
import unittest
+from webkitpy.layout_tests.port.factory import PortFactory
from webkitpy.layout_tests.port import server_process
-from webkitpy.common.system.executive import ScriptError
-from webkitpy.common.system.executive_mock import MockExecutive2
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.common.system.outputcapture import OutputCapture
-def _logging_run_command(args):
- print args
-
-
-def _throwing_run_command(args):
- raise ScriptError("MOCK script error")
-
class TrivialMockPort(object):
+ def __init__(self):
+ self.host = MockSystemHost()
+
def results_directory(self):
return "/mock-results"
def check_for_leaks(self, process_name, process_pid):
pass
- def is_crash_reporter(self, process_name):
- return False
-
class MockFile(object):
def __init__(self, server_process):
@@ -88,26 +83,44 @@ class FakeServerProcess(server_process.ServerProcess):
class TestServerProcess(unittest.TestCase):
+ def test_basic(self):
+ cmd = [sys.executable, '-c', 'import sys; import time; time.sleep(0.02); print "stdout"; sys.stdout.flush(); print >>sys.stderr, "stderr"']
+ host = SystemHost()
+ factory = PortFactory(host)
+ port = factory.get()
+ now = time.time()
+ proc = server_process.ServerProcess(port, 'python', cmd)
+ proc.write('')
+
+ self.assertEquals(proc.poll(), None)
+ self.assertFalse(proc.has_crashed())
+
+ # check that doing a read after an expired deadline returns
+ # nothing immediately.
+ line = proc.read_stdout_line(now - 1)
+ self.assertEquals(line, None)
+
+ line = proc.read_stdout_line(now + 1.0)
+ self.assertEquals(line.strip(), "stdout")
+
+ line = proc.read_stderr_line(now + 1.0)
+ self.assertEquals(line.strip(), "stderr")
+
+ proc.stop()
+
def test_broken_pipe(self):
- server_process = FakeServerProcess(port_obj=TrivialMockPort(), name="test", cmd=["test"])
+ port_obj = TrivialMockPort()
+
+ port_obj.host.platform.os_name = 'win'
+ server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
- self.assertTrue(server_process.crashed)
+ self.assertTrue(server_process.has_crashed())
self.assertEquals(server_process._proc, None)
self.assertEquals(server_process.broken_pipes, [server_process.stdin])
- def test_sample_process(self):
- # Currently, sample-on-timeout only works on Darwin.
- if sys.platform != "darwin":
- return
- server_process = FakeServerProcess(port_obj=TrivialMockPort(), name="test", cmd=["test"], executive=MockExecutive2(run_command_fn=_logging_run_command))
- server_process._proc = MockProc(server_process)
- expected_stdout = "['/usr/bin/sample', 1, 10, 10, '-file', '/mock-results/test-1.sample.txt']\n"
- OutputCapture().assert_outputs(self, server_process._sample, expected_stdout=expected_stdout)
-
- def test_sample_process_throws_exception(self):
- # Currently, sample-on-timeout only works on Darwin.
- if sys.platform != "darwin":
- return
- server_process = FakeServerProcess(port_obj=TrivialMockPort(), name="test", cmd=["test"], executive=MockExecutive2(run_command_fn=_throwing_run_command))
- server_process._proc = MockProc(server_process)
- OutputCapture().assert_outputs(self, server_process._sample)
+ port_obj.host.platform.os_name = 'mac'
+ server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
+ server_process.write("should break")
+ self.assertTrue(server_process.has_crashed())
+ self.assertEquals(server_process._proc, None)
+ self.assertEquals(server_process.broken_pipes, [server_process.stdin])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index e1132cbaa..b6b2229e7 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -35,6 +35,7 @@ from webkitpy.layout_tests.port import Port, Driver, DriverOutput
from webkitpy.layout_tests.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.crashlogs import CrashLogs
# This sets basic expectations for a test. Each individual expectation
@@ -371,9 +372,6 @@ class TestPort(Port):
def default_child_processes(self):
return 1
- def default_worker_model(self):
- return 'inline'
-
def worker_startup_delay_secs(self):
return 0
@@ -401,11 +399,12 @@ class TestPort(Port):
def webkit_base(self):
return '/test.checkout'
- def skipped_tests(self, test_list):
+ def skipped_layout_tests(self, test_list):
# This allows us to test the handling Skipped files, both with a test
# that actually passes, and a test that does fail.
return set(['failures/expected/skip_text.html',
- 'failures/unexpected/skip_pass.html'])
+ 'failures/unexpected/skip_pass.html',
+ 'virtual/skipped'])
def name(self):
return self._name
@@ -462,12 +461,10 @@ class TestPort(Port):
test_configurations = []
for version, architecture in self._all_systems():
for build_type in self._all_build_types():
- for graphics_type in self._all_graphics_types():
- test_configurations.append(TestConfiguration(
- version=version,
- architecture=architecture,
- build_type=build_type,
- graphics_type=graphics_type))
+ test_configurations.append(TestConfiguration(
+ version=version,
+ architecture=architecture,
+ build_type=build_type))
return test_configurations
def _all_systems(self):
@@ -482,9 +479,6 @@ class TestPort(Port):
def _all_build_types(self):
return ('debug', 'release')
- def _all_graphics_types(self):
- return ('cpu', 'gpu')
-
def configuration_specifier_macros(self):
"""To avoid surprises when introducing new macros, these are intentionally fixed in time."""
return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'vista', 'win7'], 'linux': ['lucid']}
@@ -495,6 +489,7 @@ class TestPort(Port):
def virtual_test_suites(self):
return [
VirtualTestSuite('virtual/passes', 'passes', ['--virtual-arg']),
+ VirtualTestSuite('virtual/skipped', 'failures/expected', ['--virtual-arg2']),
]
class TestDriver(Driver):
@@ -524,13 +519,22 @@ class TestDriver(Driver):
if test.actual_audio:
audio = base64.b64decode(test.actual_audio)
crashed_process_name = None
+ crashed_pid = None
if test.crash:
crashed_process_name = self._port.driver_name()
+ crashed_pid = 1
elif test.web_process_crash:
crashed_process_name = 'WebProcess'
- return DriverOutput(actual_text, test.actual_image,
- test.actual_checksum, audio, crash=test.crash or test.web_process_crash,
- crashed_process_name=crashed_process_name,
+ crashed_pid = 2
+
+ crash_log = ''
+ if crashed_process_name:
+ crash_logs = CrashLogs(self._port.host)
+ crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
+
+ return DriverOutput(actual_text, test.actual_image, test.actual_checksum, audio,
+ crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name,
+ crashed_pid=crashed_pid, crash_log=crash_log,
test_time=time.time() - start_time, timeout=test.timeout, error=test.error)
def start(self, pixel_tests, per_test_args):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
index cc3b0c716..a41d62e32 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
@@ -180,7 +180,8 @@ class WebKitPort(Port):
else:
tolerance = 0.1
command = [self._path_to_image_diff(), '--tolerance', str(tolerance)]
- process = server_process.ServerProcess(self, 'ImageDiff', command)
+ environment = self.setup_environ_for_server('ImageDiff')
+ process = server_process.ServerProcess(self, 'ImageDiff', command, environment)
process.write('Content-Length: %d\n%sContent-Length: %d\n%s' % (
len(actual_contents), actual_contents,
@@ -194,7 +195,7 @@ class WebKitPort(Port):
while True:
output = sp.read_stdout_line(deadline)
- if sp.timed_out or sp.crashed or not output:
+ if sp.timed_out or sp.has_crashed() or not output:
break
if output.startswith('diff'): # This is the last line ImageDiff prints.
@@ -207,9 +208,12 @@ class WebKitPort(Port):
output = sp.read_stdout_line(deadline)
break
+ stderr = sp.pop_all_buffered_stderr()
+ if stderr:
+ _log.warn("ImageDiff produced stderr output:\n" + stderr)
if sp.timed_out:
_log.error("ImageDiff timed out")
- if sp.crashed:
+ if sp.has_crashed():
_log.error("ImageDiff crashed")
# FIXME: There is no need to shut down the ImageDiff server after every diff.
sp.stop()
@@ -250,20 +254,8 @@ class WebKitPort(Port):
return dirs_to_skip
def _runtime_feature_list(self):
- """Return the supported features of DRT. If a port doesn't support
- this DRT switch, it has to override this method to return None"""
- supported_features_command = [self._path_to_driver(), '--print-supported-features']
- try:
- output = self._executive.run_command(supported_features_command, error_handler=Executive.ignore_error)
- except OSError, e:
- _log.warn("Exception running driver: %s, %s. Driver must be built before calling WebKitPort.test_expectations()." % (supported_features_command, e))
- return None
-
- # Note: win/DumpRenderTree.cpp does not print a leading space before the features_string.
- match_object = re.match("SupportedFeatures:\s*(?P<features_string>.*)\s*", output)
- if not match_object:
- return None
- return match_object.group('features_string').split(' ')
+ """If a port makes certain features available only through runtime flags, it can override this routine to indicate which ones are available."""
+ return None
def _webcore_symbols_string(self):
webcore_library_path = self._path_to_webcore_library()
@@ -345,18 +337,18 @@ class WebKitPort(Port):
return "%s-wk2" % self.port_name
def _skipped_file_search_paths(self):
- # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] not the full casade.
+ # Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
+ # included via --additional-platform-directory, not the full casade.
# Note order doesn't matter since the Skipped file contents are all combined.
- #
- # FIXME: It's not correct to assume that port names map directly to
- # directory names. For example, mac-future is a port name that does
- # not have a cooresponding directory. The WebKit2 ports are another
- # example.
- search_paths = set([self.port_name, self.name()])
+ search_paths = set([self.port_name])
+ if 'future' not in self.name():
+ search_paths.add(self.name())
if self.get_option('webkit_test_runner'):
# Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
# issues, all wk2 ports share a skipped list under platform/wk2.
search_paths.update([self._wk2_port_name(), "wk2"])
+ search_paths.update(self.get_option("additional_platform_directory", []))
+
return search_paths
def test_expectations(self):
@@ -369,15 +361,11 @@ class WebKitPort(Port):
return expectations
def skipped_layout_tests(self, test_list):
- # Use a set to allow duplicates
tests_to_skip = set(self._expectations_from_skipped_files(self._skipped_file_search_paths()))
tests_to_skip.update(self._tests_for_other_platforms())
tests_to_skip.update(self._skipped_tests_for_unsupported_features(test_list))
return tests_to_skip
- def skipped_tests(self, test_list):
- return self.skipped_layout_tests(test_list)
-
def _build_path(self, *comps):
# --root is used for running with a pre-built root (like from a nightly zip).
build_directory = self.get_option('root') or self.get_option('build_directory')
@@ -386,7 +374,7 @@ class WebKitPort(Port):
# Set --build-directory here Since this modifies the options object used by the worker subprocesses,
# it avoids the slow call out to build_directory in each subprocess.
self.set_option_default('build_directory', build_directory)
- return self._filesystem.join(build_directory, *comps)
+ return self._filesystem.join(self._filesystem.abspath(build_directory), *comps)
def _path_to_driver(self):
return self._build_path(self.driver_name())
@@ -449,7 +437,8 @@ class WebKitDriver(Driver):
# "#CRASHED - PROCESSNAME". Since those can happen at any time
# and ServerProcess won't be aware of them (since the actual tool
# didn't crash, just a subprocess) we record the crashed subprocess name here.
- self._crashed_subprocess_name = None
+ self._crashed_process_name = None
+ self._crashed_pid = None
# stderr reading is scoped on a per-test (not per-block) basis, so we store the accumulated
# stderr output, as well as if we've seen #EOF on this driver instance.
@@ -467,8 +456,6 @@ class WebKitDriver(Driver):
def cmd_line(self, pixel_tests, per_test_args):
cmd = self._command_wrapper(self._port.get_option('wrapper'))
cmd.append(self._port._path_to_driver())
- if self._port.get_option('skip_pixel_test_if_no_baseline'):
- cmd.append('--skip-pixel-test-if-no-baseline')
if self._port.get_option('gc_between_tests'):
cmd.append('--gc-between-tests')
if self._port.get_option('complex_text'):
@@ -481,7 +468,7 @@ class WebKitDriver(Driver):
cmd.extend(self._port.get_option('additional_drt_flag', []))
- if pixel_tests or self._pixel_tests:
+ if pixel_tests:
cmd.append('--pixel-tests')
cmd.extend(per_test_args)
@@ -491,41 +478,44 @@ class WebKitDriver(Driver):
def _start(self, pixel_tests, per_test_args):
server_name = self._port.driver_name()
environment = self._port.setup_environ_for_server(server_name)
+ environment['DYLD_LIBRARY_PATH'] = self._port._build_path()
environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
# FIXME: We're assuming that WebKitTestRunner checks this DumpRenderTree-named environment variable.
environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
- self._crashed_subprocess_name = None
+ self._crashed_process_name = None
+ self._crashed_pid = None
self._server_process = server_process.ServerProcess(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
def has_crashed(self):
if self._server_process is None:
return False
- return self._server_process.poll() is not None
+ if self._crashed_process_name:
+ return True
+ if self._server_process.has_crashed():
+ self._crashed_process_name = self._server_process.name()
+ self._crashed_pid = self._server_process.pid()
+ return True
+ return False
def _check_for_driver_crash(self, error_line):
if error_line == "#CRASHED\n":
# This is used on Windows to report that the process has crashed
# See http://trac.webkit.org/changeset/65537.
- self._server_process.set_crashed(True)
- elif error_line == "#CRASHED - WebProcess\n":
+ self._crashed_process_name = self._server_process.name()
+ self._crashed_pid = self._server_process.pid()
+ elif error_line.startswith("#CRASHED - WebProcess"):
# WebKitTestRunner uses this to report that the WebProcess subprocess crashed.
- self._subprocess_crashed("WebProcess")
- return self._detected_crash()
-
- def _detected_crash(self):
- # We can't just check self._server_process.crashed because WebKitTestRunner
- # can report subprocess crashes at any time by printing
- # "#CRASHED - WebProcess", we want to count those as crashes as well.
- return self._server_process.crashed or self._crashed_subprocess_name
-
- def _subprocess_crashed(self, subprocess_name):
- self._crashed_subprocess_name = subprocess_name
-
- def _crashed_process_name(self):
- if not self._detected_crash():
- return None
- return self._crashed_subprocess_name or self._server_process.process_name()
+ pid = None
+ m = re.search('pid (\d+)', error_line)
+ if m:
+ pid = int(m.group(1))
+ self._crashed_process_name = 'WebProcess'
+ self._crashed_pid = pid
+ # FIXME: delete this after we're sure this code is working :)
+ _log.debug('WebProcess crash, pid = %s, error_line = %s' % (str(pid), error_line))
+ return True
+ return self.has_crashed()
def _command_from_driver_input(self, driver_input):
if self.is_http_test(driver_input.test_name):
@@ -555,14 +545,14 @@ class WebKitDriver(Driver):
return (None, block.content_hash)
def run_test(self, driver_input):
+ start_time = time.time()
if not self._server_process:
- self._start(driver_input.is_reftest or self._pixel_tests, [])
+ self._start(driver_input.should_run_pixel_test, driver_input.args)
self.error_from_test = str()
self.err_seen_eof = False
command = self._command_from_driver_input(driver_input)
- start_time = time.time()
- deadline = time.time() + int(driver_input.timeout) / 1000.0
+ deadline = start_time + int(driver_input.timeout) / 1000.0
self._server_process.write(command)
text, audio = self._read_first_block(deadline) # First block is either text or audio
@@ -574,10 +564,22 @@ class WebKitDriver(Driver):
# FIXME: We may need to also read stderr until the process dies?
self.error_from_test += self._server_process.pop_all_buffered_stderr()
+ crash_log = ''
+ if self.has_crashed():
+ crash_log = self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, text, self.error_from_test,
+ newer_than=start_time)
+
+ timeout = self._server_process.timed_out
+ if timeout:
+ # DRT doesn't have a built in timer to abort the test, so we might as well
+ # kill the process directly and not wait for it to shut down cleanly (since it may not).
+ self._server_process.kill()
+
return DriverOutput(text, image, actual_image_hash, audio,
- crash=self._detected_crash(), test_time=time.time() - start_time,
- timeout=self._server_process.timed_out, error=self.error_from_test,
- crashed_process_name=self._crashed_process_name())
+ crash=self.has_crashed(), test_time=time.time() - start_time,
+ timeout=timeout, error=self.error_from_test,
+ crashed_process_name=self._crashed_process_name,
+ crashed_pid=self._crashed_pid, crash_log=crash_log)
def _read_header(self, block, line, header_text, header_attr, header_filter=None):
if line.startswith(header_text) and getattr(block, header_attr) is None:
@@ -607,7 +609,7 @@ class WebKitDriver(Driver):
block = ContentBlock()
out_seen_eof = False
- while True:
+ while not self.has_crashed():
if out_seen_eof and (self.err_seen_eof or not wait_for_stderr_eof):
break
@@ -620,7 +622,7 @@ class WebKitDriver(Driver):
else:
out_line, err_line = self._server_process.read_either_stdout_or_stderr_line(deadline)
- if self._server_process.timed_out or self._detected_crash():
+ if self._server_process.timed_out or self.has_crashed():
break
if out_line:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py
index aadbf72fd..63502a570 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/webkit_unittest.py
@@ -35,26 +35,24 @@ from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port import port_testcase
from webkitpy.layout_tests.port.webkit import WebKitPort, WebKitDriver
+from webkitpy.layout_tests.port.config_mock import MockConfig
from webkitpy.tool.mocktool import MockOptions
class TestWebKitPort(WebKitPort):
port_name = "testwebkitport"
- def __init__(self, symbols_string=None, feature_list=None,
- expectations_file=None, skips_file=None, host=None,
+ def __init__(self, symbols_string=None,
+ expectations_file=None, skips_file=None, host=None, config=None,
**kwargs):
self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
- self.feature_list = feature_list # Passing [] disables all runtime-detectable features.
host = host or MockSystemHost()
- WebKitPort.__init__(self, host=host, **kwargs)
+ config = config or MockConfig()
+ WebKitPort.__init__(self, host=host, config=config, **kwargs)
def all_test_configurations(self):
return [self.test_configuration()]
- def _runtime_feature_list(self):
- return self.feature_list
-
def _webcore_symbols_string(self):
return self.symbols_string
@@ -125,18 +123,12 @@ class WebKitPortTest(port_testcase.PortTestCase):
result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
self.assertEqual(result_directories, expected_directories)
- def test_runtime_feature_list(self):
- port = WebKitPort(MockSystemHost())
- port._executive.run_command = lambda command, cwd=None, error_handler=None: "Nonsense"
- # runtime_features_list returns None when its results are meaningless (it couldn't run DRT or parse the output, etc.)
- self.assertEquals(port._runtime_feature_list(), None)
- port._executive.run_command = lambda command, cwd=None, error_handler=None: "SupportedFeatures:foo bar"
- self.assertEquals(port._runtime_feature_list(), ['foo', 'bar'])
-
def test_skipped_directories_for_features(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set(["animations/3d", "transforms/3d"])
- result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
+ port = TestWebKitPort(None, supported_features)
+ port._runtime_feature_list = lambda: supported_features
+ result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
self.assertEqual(result_directories, expected_directories)
def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
@@ -161,6 +153,8 @@ class WebKitPortTest(port_testcase.PortTestCase):
self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version']))
port._options = MockOptions(webkit_test_runner=True)
self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'testwebkitport-wk2', 'wk2']))
+ port._options = MockOptions(additional_platform_directory=["internal-testwebkitport"])
+ self.assertEqual(port._skipped_file_search_paths(), set(['testwebkitport', 'testwebkitport-version', 'internal-testwebkitport']))
def test_root_option(self):
port = TestWebKitPort()
@@ -248,8 +242,11 @@ class WebKitPortTest(port_testcase.PortTestCase):
class MockServerProcess(object):
def __init__(self, lines=None):
self.timed_out = False
- self.crashed = False
self.lines = lines or []
+ self.crashed = False
+
+ def has_crashed(self):
+ return self.crashed
def read_stdout_line(self, deadline):
return self.lines.pop(0) + "\n"
@@ -300,4 +297,49 @@ class WebKitDriverTest(unittest.TestCase):
def test_no_timeout(self):
port = TestWebKitPort()
driver = WebKitDriver(port, 0, pixel_tests=True, no_timeout=True)
- self.assertEquals(driver.cmd_line(True, []), ['MOCK output of child process/DumpRenderTree', '--no-timeout', '--pixel-tests', '-'])
+ self.assertEquals(driver.cmd_line(True, []), ['/mock-build/DumpRenderTree', '--no-timeout', '--pixel-tests', '-'])
+
+ def test_check_for_driver_crash(self):
+ port = TestWebKitPort()
+ driver = WebKitDriver(port, 0, pixel_tests=True)
+
+ class FakeServerProcess(object):
+ def __init__(self, crashed):
+ self.crashed = crashed
+
+ def pid(self):
+ return 1234
+
+ def name(self):
+ return 'FakeServerProcess'
+
+ def has_crashed(self):
+ return self.crashed
+
+ def assert_crash(driver, error_line, crashed, name, pid):
+ self.assertEquals(driver._check_for_driver_crash(error_line), crashed)
+ self.assertEquals(driver._crashed_process_name, name)
+ self.assertEquals(driver._crashed_pid, pid)
+
+ driver._server_process = FakeServerProcess(False)
+ assert_crash(driver, '', False, None, None)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ assert_crash(driver, '#CRASHED\n', True, 'FakeServerProcess', 1234)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ assert_crash(driver, '#CRASHED - WebProcess\n', True, 'WebProcess', None)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(False)
+ assert_crash(driver, '#CRASHED - WebProcess (pid 8675)\n', True, 'WebProcess', 8675)
+
+ driver._crashed_process_name = None
+ driver._crashed_pid = None
+ driver._server_process = FakeServerProcess(True)
+ assert_crash(driver, '', True, 'FakeServerProcess', 1234)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win.py b/Tools/Scripts/webkitpy/layout_tests/port/win.py
index e463b02f4..da3209ead 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/win.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/win.py
@@ -30,7 +30,7 @@ import logging
import re
import sys
-from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.executive import ScriptError, Executive
from webkitpy.common.system.path import abspath_to_uri
from webkitpy.layout_tests.port.apple import ApplePort
@@ -80,3 +80,17 @@ class WinPort(ApplePort):
# FIXME: webkitperl/httpd.pm installs /usr/lib/apache/libphp4.dll on cycwin automatically
# as part of running old-run-webkit-tests. That's bad design, but we may need some similar hack.
# We might use setup_environ_for_server for such a hack (or modify apache_http_server.py).
+
+ def _runtime_feature_list(self):
+ supported_features_command = [self._path_to_driver(), '--print-supported-features']
+ try:
+ output = self._executive.run_command(supported_features_command, error_handler=Executive.ignore_error)
+ except OSError, e:
+ _log.warn("Exception running driver: %s, %s. Driver must be built before calling WebKitPort.test_expectations()." % (supported_features_command, e))
+ return None
+
+ # Note: win/DumpRenderTree.cpp does not print a leading space before the features_string.
+ match_object = re.match("SupportedFeatures:\s*(?P<features_string>.*)\s*", output)
+ if not match_object:
+ return None
+ return match_object.group('features_string').split(' ')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
index fc972a4cd..c511c872e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
@@ -95,3 +95,11 @@ class WinPortTest(port_testcase.PortTestCase):
def test_operating_system(self):
self.assertEqual('win', self.make_port().operating_system())
+
+ def test_runtime_feature_list(self):
+ port = self.make_port()
+ port._executive.run_command = lambda command, cwd=None, error_handler=None: "Nonsense"
+ # runtime_features_list returns None when its results are meaningless (it couldn't run DRT or parse the output, etc.)
+ self.assertEquals(port._runtime_feature_list(), None)
+ port._executive.run_command = lambda command, cwd=None, error_handler=None: "SupportedFeatures:foo bar"
+ self.assertEquals(port._runtime_feature_list(), ['foo', 'bar'])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py b/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py
new file mode 100644
index 000000000..cfcb5a18c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py
@@ -0,0 +1,76 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import os
+import signal
+import subprocess
+
+from webkitpy.layout_tests.port.server_process import ServerProcess
+from webkitpy.layout_tests.port.webkit import WebKitDriver
+from webkitpy.common.system.executive import Executive
+
+_log = logging.getLogger(__name__)
+
+
+class XvfbDriver(WebKitDriver):
+ def _start(self, pixel_tests, per_test_args):
+
+ # Collect the number of X servers running already and make
+ # sure our Xvfb process doesn't clash with any of them.
+ def x_filter(process_name):
+ return process_name.find("Xorg") > -1
+
+ running_displays = len(Executive().running_pids(x_filter))
+
+ # Use even displays for pixel tests and odd ones otherwise. When pixel tests are disabled,
+ # DriverProxy creates two drivers, one for normal and the other for ref tests. Both have
+ # the same worker number, so this prevents them from using the same Xvfb instance.
+ display_id = self._worker_number * 2 + running_displays
+ if pixel_tests:
+ display_id += 1
+ run_xvfb = ["Xvfb", ":%d" % (display_id), "-screen", "0", "800x600x24", "-nolisten", "tcp"]
+ with open(os.devnull, 'w') as devnull:
+ self._xvfb_process = subprocess.Popen(run_xvfb, stderr=devnull)
+ server_name = self._port.driver_name()
+ environment = self._port.setup_environ_for_server(server_name)
+ # We must do this here because the DISPLAY number depends on _worker_number
+ environment['DISPLAY'] = ":%d" % (display_id)
+ self._crashed_process_name = None
+ self._crashed_pid = None
+ self._server_process = ServerProcess(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
+
+ def stop(self):
+ WebKitDriver.stop(self)
+ if getattr(self, '_xvfb_process', None):
+ try:
+ self._xvfb_process.terminate()
+ self._xvfb_process.wait()
+ except OSError:
+ _log.warn("The driver is already terminated.")
+ self._xvfb_process = None
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 6b38a3a71..8e430cd94 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -39,6 +39,7 @@ import sys
from webkitpy.common.host import Host
from webkitpy.layout_tests.controllers.manager import Manager, WorkerException
from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.port import port_options
from webkitpy.layout_tests.views import printing
@@ -85,7 +86,7 @@ def lint(port, options, expectations_class):
def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdout):
warnings = _set_up_derived_options(port, options)
- printer = printing.Printer(port, options, regular_output, buildbot_output, configure_logging=True)
+ printer = printing.Printer(port, options, regular_output, buildbot_output, logger=logging.getLogger())
for warning in warnings:
_log.warning(warning)
@@ -121,11 +122,8 @@ def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdo
printer.print_update("Parsing expectations ...")
manager.parse_expectations()
- result_summary = manager.set_up_run()
- if result_summary:
- unexpected_result_count = manager.run(result_summary)
- manager.clean_up_run()
- _log.debug("Testing completed, Exit status: %d" % unexpected_result_count)
+ unexpected_result_count = manager.run()
+ _log.debug("Testing completed, Exit status: %d" % unexpected_result_count)
finally:
printer.cleanup()
@@ -137,13 +135,6 @@ def _set_up_derived_options(port, options):
# We return a list of warnings to print after the printer is initialized.
warnings = []
- if options.worker_model is None:
- options.worker_model = port.default_worker_model()
-
- if options.worker_model == 'inline':
- if options.child_processes and int(options.child_processes) > 1:
- warnings.append("--worker-model=inline overrides --child-processes")
- options.child_processes = "1"
if not options.child_processes:
options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
str(port.default_child_processes()))
@@ -156,9 +147,9 @@ def _set_up_derived_options(port, options):
if not options.time_out_ms:
if options.configuration == "Debug":
- options.time_out_ms = str(2 * Manager.DEFAULT_TEST_TIMEOUT_MS)
+ options.time_out_ms = str(2 * port.default_test_timeout_ms())
else:
- options.time_out_ms = str(Manager.DEFAULT_TEST_TIMEOUT_MS)
+ options.time_out_ms = str(port.default_test_timeout_ms())
options.slow_time_out_ms = str(5 * int(options.time_out_ms))
@@ -201,27 +192,7 @@ def parse_args(args=None):
option_group_definitions = []
- # FIXME: All of these options should be stored closer to the code which
- # FIXME: actually uses them. configuration_options should move
- # FIXME: to WebKitPort and be shared across all scripts.
- option_group_definitions.append(("Configuration Options", [
- optparse.make_option("-t", "--target", dest="configuration",
- help="(DEPRECATED)"),
- # FIXME: --help should display which configuration is default.
- optparse.make_option('--debug', action='store_const', const='Debug',
- dest="configuration",
- help='Set the configuration to Debug'),
- optparse.make_option('--release', action='store_const',
- const='Release', dest="configuration",
- help='Set the configuration to Release'),
- # old-run-webkit-tests also accepts -c, --configuration CONFIGURATION.
- optparse.make_option("--platform", help="Override port/platform being tested (i.e. chromium-mac)"),
- optparse.make_option("--chromium", action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
- optparse.make_option('--efl', action='store_const', const='efl', dest="platform", help='Alias for --platform=efl'),
- optparse.make_option('--gtk', action='store_const', const='gtk', dest="platform", help='Alias for --platform=gtk'),
- optparse.make_option('--qt', action='store_const', const='qt', dest="platform", help='Alias for --platform=qt'),
- ]))
-
+ option_group_definitions.append(("Configuration options", port_options()))
option_group_definitions.append(("Printing Options", printing.print_options()))
# FIXME: These options should move onto the ChromiumPort.
@@ -335,6 +306,9 @@ def parse_args(args=None):
default=[], help="Additional directory where to look for test "
"baselines (will take precendence over platform baselines). "
"Specify multiple times to add multiple search path entries."),
+ optparse.make_option("--additional-expectations", action="append", default=[],
+ help="Path to a test_expectations file that will override previous expectations. "
+ "Specify multiple times for multiple sets of overrides."),
optparse.make_option("--no-show-results", action="store_false",
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
@@ -379,6 +353,8 @@ def parse_args(args=None):
# old-run-webkit-tests:
# -i|--ignore-tests Comma-separated list of directories
# or tests to ignore
+ optparse.make_option("-i", "--ignore-tests", action="append", default=[],
+ help="directories or test to ignore (may specify multiple times)"),
optparse.make_option("--test-list", action="append",
help="read list of tests to run from file", metavar="FILE"),
# old-run-webkit-tests uses --skipped==[default|ignore|only]
@@ -408,16 +384,8 @@ def parse_args(args=None):
optparse.make_option("--child-processes",
help="Number of DumpRenderTrees to run in parallel."),
# FIXME: Display default number of child processes that will run.
- optparse.make_option("--worker-model", action="store",
- default=None, help=("controls worker model. Valid values are "
- "'inline' and 'processes'.")),
- optparse.make_option("-f", "--experimental-fully-parallel",
- action="store_true",
+ optparse.make_option("-f", "--fully-parallel", action="store_true",
help="run all tests in parallel"),
- optparse.make_option("--no-experimental-fully-parallel",
- action="store_false",
- dest="experimental_fully_parallel",
- help="do not run all tests in parallel"),
optparse.make_option("--exit-after-n-failures", type="int", default=500,
help="Exit after the first N failures instead of running all "
"tests"),
@@ -480,6 +448,7 @@ def main():
host = Host()
host._initialize_scm()
port = host.port_factory.get(options.platform, options)
+ logging.getLogger().setLevel(logging.DEBUG if options.verbose else logging.INFO)
return run(port, options, args)
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index dc2e1c2b4..ee35697d6 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -33,6 +33,7 @@ import codecs
import itertools
import json
import logging
+import platform
import Queue
import re
import StringIO
@@ -42,14 +43,9 @@ import time
import threading
import unittest
-from webkitpy.common.system import path
-
-# FIXME: remove this when we fix test-webkitpy to work properly on cygwin
-# (bug 63846).
-SHOULD_TEST_PROCESSES = sys.platform not in ('cygwin', 'win32')
-
-from webkitpy.common.system import outputcapture
+from webkitpy.common.system import outputcapture, path
from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
+from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests import port
@@ -73,8 +69,8 @@ def parse_args(extra_args=None, record_results=False, tests_included=False, new_
if not new_results:
args.append('--no-new-test-results')
- if not '--child-processes' in extra_args and not '--worker-model' in extra_args:
- args.extend(['--worker-model', 'inline'])
+ if not '--child-processes' in extra_args:
+ args.extend(['--child-processes', 1])
args.extend(extra_args)
if not tests_included:
# We use the glob to test that globbing works.
@@ -263,6 +259,16 @@ class LintTest(unittest.TestCase, StreamTestingMixin):
class MainTest(unittest.TestCase, StreamTestingMixin):
+ def setUp(self):
+ # A real PlatformInfo object is used here instead of a
+ # MockPlatformInfo because we need to actually check for
+ # Windows and Mac to skip some tests.
+ self._platform = SystemHost().platform
+
+ # FIXME: Remove this when we fix test-webkitpy to work
+ # properly on cygwin (bug 63846).
+ self.should_test_processes = not self._platform.is_win()
+
def test_accelerated_compositing(self):
# This just tests that we recognize the command line args
self.assertTrue(passing_run(['--accelerated-video']))
@@ -285,25 +291,16 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
for batch in batch_tests_run:
self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
- def test_child_process_1(self):
- if SHOULD_TEST_PROCESSES:
- _, _, regular_output, _ = logging_run(
- ['--print', 'config', '--worker-model', 'processes', '--child-processes', '1'])
- self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
-
def test_child_processes_2(self):
- # This test seems to fail on win32.
- if sys.platform == 'win32':
- return
- if SHOULD_TEST_PROCESSES:
+ if self.should_test_processes:
_, _, regular_output, _ = logging_run(
- ['--print', 'config', '--worker-model', 'processes', '--child-processes', '2'])
+ ['--print', 'config', '--child-processes', '2'])
self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
def test_child_processes_min(self):
- if SHOULD_TEST_PROCESSES:
+ if self.should_test_processes:
_, _, regular_output, _ = logging_run(
- ['--print', 'config', '--worker-model', 'processes', '--child-processes', '2', 'passes'],
+ ['--print', 'config', '--child-processes', '2', 'passes'],
tests_included=True)
self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
@@ -319,18 +316,17 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# whether they are in-process or out. inline exceptions work as normal,
# which allows us to get the full stack trace and traceback from the
# worker. The downside to this is that it could be any error, but this
- # is actually useful in testing, which is what --worker-model=inline is
- # usually used for.
+ # is actually useful in testing.
#
# Exceptions raised in a separate process are re-packaged into
# WorkerExceptions, which have a string capture of the stack which can
# be printed, but don't display properly in the unit test exception handlers.
self.assertRaises(ValueError, logging_run,
- ['failures/expected/exception.html'], tests_included=True)
+ ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
- if SHOULD_TEST_PROCESSES:
+ if self.should_test_processes:
self.assertRaises(run_webkit_tests.WorkerException, logging_run,
- ['--worker-model', 'processes', 'failures/expected/exception.html'], tests_included=True)
+ ['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True)
def test_full_results_html(self):
# FIXME: verify html?
@@ -355,13 +351,13 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# Note that this also tests running a test marked as SKIP if
# you specify it explicitly.
self.assertRaises(KeyboardInterrupt, logging_run,
- ['failures/expected/keyboard.html'], tests_included=True)
-
- def test_keyboard_interrupt_inline_worker_model(self):
- self.assertRaises(KeyboardInterrupt, logging_run,
- ['failures/expected/keyboard.html', '--worker-model', 'inline'],
+ ['failures/expected/keyboard.html', '--child-processes', '1'],
tests_included=True)
+ if self.should_test_processes:
+ self.assertRaises(KeyboardInterrupt, logging_run,
+ ['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True)
+
def test_no_tests_found(self):
res, out, err, user = logging_run(['resources'], tests_included=True)
self.assertEqual(res, -1)
@@ -397,6 +393,20 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
tests_run = get_tests_run(['--skip-pixel-test-if-no-baseline'] + tests_to_run, tests_included=True, flatten_batches=True)
self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html'])
+ def test_ignore_tests(self):
+ def assert_ignored(args, tests_expected_to_run):
+ tests_to_run = ['failures/expected/image.html', 'passes/image.html']
+ tests_run = get_tests_run(args + tests_to_run, tests_included=True, flatten_batches=True)
+ self.assertEquals(tests_run, tests_expected_to_run)
+
+ assert_ignored(['-i', 'failures/expected/image.html'], ['passes/image.html'])
+ assert_ignored(['-i', 'passes'], ['failures/expected/image.html'])
+
+ # Note here that there is an expectation for failures/expected/image.html already, but
+ # it is overriden by the command line arg. This might be counter-intuitive.
+ # FIXME: This isn't currently working ...
+ # assert_ignored(['-i', 'failures/expected'], ['passes/image.html'])
+
def test_iterations(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
@@ -569,6 +579,10 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
def test_crash_log(self):
+ # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
+ # Currently CrashLog uploading only works on Darwin.
+ if not self._platform.is_mac():
+ return
mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
host = MockHost()
host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
@@ -579,12 +593,13 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
record_results=True,
host=host)
expected_crash_log = mock_crash_report
- # Currently CrashLog uploading only works on Darwin.
- if sys.platform != "darwin":
- expected_crash_log = "mock-std-error-output"
self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
def test_web_process_crash_log(self):
+ # FIXME: Need to rewrite these tests to not be mac-specific, or move them elsewhere.
+ # Currently CrashLog uploading only works on Darwin.
+ if not self._platform.is_mac():
+ return
mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
host = MockHost()
host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
@@ -594,11 +609,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
tests_included=True,
record_results=True,
host=host)
- expected_crash_log = mock_crash_report
- # Currently CrashLog uploading only works on Darwin.
- if sys.platform != "darwin":
- expected_crash_log = "mock-std-error-output"
- self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), expected_crash_log)
+ self.assertEquals(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
def test_exit_after_n_failures_upload(self):
host = MockHost()
@@ -610,7 +621,22 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
tests_included=True,
record_results=True,
host=host)
- self.assertTrue('/tmp/layout-test-results/incremental_results.json' in host.filesystem.files)
+
+ # By returning False, we know that the incremental results were generated and then deleted.
+ self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
+
+ # This checks that we report only the number of tests that actually failed.
+ self.assertEquals(res, 1)
+
+ # This checks that passes/text.html is considered SKIPped.
+ self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
+
+ # This checks that we told the user we bailed out.
+ self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
+
+ # This checks that neither test ran as expected.
+ # FIXME: This log message is confusing; tests that were skipped should be called out separately.
+ self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
def test_exit_after_n_failures(self):
# Unexpected failures should result in tests stopping.
@@ -664,17 +690,6 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
flatten_batches=True)
self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
- def test_exit_after_n_crashes_inline_worker_model(self):
- tests_run = get_tests_run([
- 'failures/unexpected/timeout.html',
- 'passes/text.html',
- '--exit-after-n-crashes-or-timeouts', '1',
- '--worker-model', 'inline',
- ],
- tests_included=True,
- flatten_batches=True)
- self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
-
def test_results_directory_absolute(self):
# We run a configuration that should fail, to generate output, then
# look for what the output results url was.
@@ -711,19 +726,17 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# These next tests test that we run the tests in ascending alphabetical
# order per directory. HTTP tests are sharded separately from other tests,
# so we have to test both.
- def assert_run_order(self, worker_model, child_processes='1'):
- tests_run = get_tests_run(['--worker-model', worker_model,
- '--child-processes', child_processes, 'passes'],
+ def assert_run_order(self, child_processes='1'):
+ tests_run = get_tests_run(['--child-processes', child_processes, 'passes'],
tests_included=True, flatten_batches=True)
self.assertEquals(tests_run, sorted(tests_run))
- tests_run = get_tests_run(['--worker-model', worker_model,
- '--child-processes', child_processes, 'http/tests/passes'],
+ tests_run = get_tests_run(['--child-processes', child_processes, 'http/tests/passes'],
tests_included=True, flatten_batches=True)
self.assertEquals(tests_run, sorted(tests_run))
def test_run_order__inline(self):
- self.assert_run_order('inline')
+ self.assert_run_order()
def test_tolerance(self):
class ImageDiffTestPort(TestPort):
@@ -756,26 +769,6 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
'virtual/passes/text.html', 'virtual/passes/args.html']))
- def test_worker_model__inline(self):
- self.assertTrue(passing_run(['--worker-model', 'inline']))
-
- def test_worker_model__inline_with_child_processes(self):
- res, out, err, user = logging_run(['--worker-model', 'inline',
- '--child-processes', '2'])
- self.assertEqual(res, 0)
- self.assertContainsLine(err, '--worker-model=inline overrides --child-processes\n')
-
- def test_worker_model__processes(self):
- if SHOULD_TEST_PROCESSES:
- self.assertTrue(passing_run(['--worker-model', 'processes']))
-
- def test_worker_model__processes_and_dry_run(self):
- if SHOULD_TEST_PROCESSES:
- self.assertTrue(passing_run(['--worker-model', 'processes', '--dry-run']))
-
- def test_worker_model__unknown(self):
- self.assertRaises(ValueError, logging_run, ['--worker-model', 'unknown'])
-
def test_reftest_run(self):
tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
self.assertEquals(['passes/reftest.html'], tests_run)
@@ -819,6 +812,12 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
self.assertContainsLine(regular_output, '--additional-platform-directory=foo is ignored since it is not absolute\n')
+ def test_additional_expectations(self):
+ host = MockHost()
+ host.filesystem.write_text_file('/tmp/overrides.txt', 'BUGX : failures/unexpected/mismatch.html = IMAGE\n')
+ self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
+ tests_included=True, host=host))
+
def test_no_http_and_force(self):
# See test_run_force, using --force raises an exception.
# FIXME: We would like to check the warnings generated.
@@ -841,8 +840,6 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
-MainTest = skip_if(MainTest, sys.platform == 'cygwin' and sys.version < '2.6', 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
-
class EndToEndTest(unittest.TestCase):
def parse_full_results(self, full_results_text):
@@ -876,7 +873,7 @@ class EndToEndTest(unittest.TestCase):
self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
- {"expected": "PASS", "ref_file": "reftests/foo/second-mismatching-ref.html", "actual": "IMAGE", 'is_reftest': True})
+ {"expected": "PASS", "ref_file": "reftests/foo/second-mismatching-ref.html", "actual": "IMAGE", "image_diff_percent": 1, 'is_reftest': True})
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
{"expected": "PASS", "ref_file": "reftests/foo/matching-ref.html", "actual": "IMAGE", "is_mismatch_reftest": True})
self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
index a09aa3345..fecc5ade3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
@@ -137,6 +137,12 @@ class LayoutTestApacheHttpd(http_server_base.HttpServerBase):
return int(self._filesystem.read_text_file(self._pid_file))
def _stop_running_server(self):
+ # If apache was forcefully killed, the pid file will not have been deleted, so check
+ # that the process specified by the pid_file no longer exists before deleting the file.
+ if self._pid and not self._executive.check_running_pid(self._pid):
+ self._filesystem.remove(self._pid_file)
+ return
+
retval, err = self._run(self._stop_cmd)
if retval or len(err):
raise http_server_base.ServerError('Failed to stop %s: %s' % (self._name, err))
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
index 8def09d0a..75ad2143c 100755
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
@@ -31,7 +31,6 @@
import logging
import os
-import sys
import time
from webkitpy.layout_tests.servers import http_server_base
@@ -177,7 +176,7 @@ class Lighttpd(http_server_base.HttpServerBase):
# Copy liblightcomp.dylib to /tmp/lighttpd/lib to work around the
# bug that mod_alias.so loads it from the hard coded path.
- if sys.platform == 'darwin':
+ if self._port_obj.host.platform.is_mac():
tmp_module_path = '/tmp/lighttpd/lib'
if not self._filesystem.exists(tmp_module_path):
self._filesystem.maybe_make_directory(tmp_module_path)
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
index ee5ef06d2..33131d247 100755
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
@@ -61,7 +61,7 @@ class HttpServerBase(object):
# randomly-generated directory under /var/folders and no one would ever
# look there.
tmpdir = tempfile.gettempdir()
- if sys.platform == 'darwin':
+ if port_obj.host.platform.is_mac():
tmpdir = '/tmp'
self._runtime_path = self._filesystem.join(tmpdir, "WebKit")
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
index b5c233b10..49a507ed6 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -27,58 +27,99 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""
-Package that implements a stream wrapper that has 'meters' as well as
-regular output. A 'meter' is a single line of text that can be erased
-and rewritten repeatedly, without producing multiple lines of output. It
-can be used to produce effects like progress bars.
-
-This package should only be called by the printing module in the layout_tests
-package.
-"""
-
-
-class MeteredStream:
- """This class is a wrapper around a stream that allows you to implement
- meters (progress bars, etc.).
-
- It can be used directly as a stream, by calling write(), but also provides
- a method called update() that will overwite prior updates().
- """
-
- def __init__(self, stream):
- """
- Args:
- stream: output stream to write to
- """
- self._stream = stream
- self._dirty = False
- self._last_update = ""
-
- def write(self, txt):
- """Write to the stream, overwriting and resetting the meter."""
-
- # This routine is called by the logging infrastructure, and
- # must not call back into logging. It is not a public function.
- self._overwrite(txt)
- self._reset()
-
- def update(self, txt):
- """Write a message that will be overwritten by subsequent update() or write() calls."""
- self._overwrite(txt)
-
- def _overwrite(self, txt):
- # Print the necessary number of backspaces to erase the previous
- # message.
- if len(self._last_update):
- self._stream.write("\b" * len(self._last_update) +
- " " * len(self._last_update) +
- "\b" * len(self._last_update))
- self._stream.write(txt)
- last_newline = txt.rfind("\n")
- self._last_update = txt[(last_newline + 1):]
- self._dirty = True
-
- def _reset(self):
- self._dirty = False
- self._last_update = ''
+import logging
+import os
+import sys
+import time
+
+
+LOG_HANDLER_NAME = 'MeteredStreamLogHandler'
+
+
+class MeteredStream(object):
+ """
+ This class implements a stream wrapper that has 'meters' as well as
+ regular output. A 'meter' is a single line of text that can be erased
+ and rewritten repeatedly, without producing multiple lines of output. It
+ can be used to produce effects like progress bars.
+ """
+
+ @staticmethod
+ def _erasure(txt):
+ num_chars = len(txt)
+ return '\b' * num_chars + ' ' * num_chars + '\b' * num_chars
+
+ @staticmethod
+ def _ensure_newline(txt):
+ return txt if txt.endswith('\n') else txt + '\n'
+
+ def __init__(self, stream=None, verbose=False, logger=None, time_fn=None, pid=None):
+ self._stream = stream or sys.stderr
+ self._verbose = verbose
+ self._time_fn = time_fn or time.time
+ self._pid = pid or os.getpid()
+
+ self._isatty = self._stream.isatty()
+ self._erasing = self._isatty and not verbose
+ self._last_partial_line = ''
+ self._last_write_time = 0.0
+ self._throttle_delay_in_secs = 0.066 if self._erasing else 10.0
+
+ self._logger = logger
+ self._log_handler = None
+ if self._logger:
+ log_level = logging.DEBUG if verbose else logging.INFO
+ self._log_handler = _LogHandler(self)
+ self._log_handler.setLevel(log_level)
+ self._logger.addHandler(self._log_handler)
+
+ def __del__(self):
+ self.cleanup()
+
+ def cleanup(self):
+ if self._logger:
+ self._logger.removeHandler(self._log_handler)
+ self._log_handler = None
+
+ def write_throttled_update(self, txt):
+ now = self._time_fn()
+ if now - self._last_write_time >= self._throttle_delay_in_secs:
+ self.write_update(txt, now)
+
+ def write_update(self, txt, now=None):
+ self.write(txt, now)
+ if self._erasing:
+ self._last_partial_line = txt[txt.rfind('\n') + 1:]
+
+ def write(self, txt, now=None):
+ now = now or self._time_fn()
+ self._last_write_time = now
+ if self._last_partial_line:
+ self._erase_last_partial_line()
+ if self._verbose:
+ now_tuple = time.localtime(now)
+ msg = '%02d:%02d:%02d.%03d %d %s' % (now_tuple.tm_hour, now_tuple.tm_min, now_tuple.tm_sec, int((now * 1000) % 1000), self._pid, self._ensure_newline(txt))
+ elif self._isatty:
+ msg = txt
+ else:
+ msg = self._ensure_newline(txt)
+
+ self._stream.write(msg)
+
+ def writeln(self, txt, now=None):
+ self.write(self._ensure_newline(txt), now)
+
+ def _erase_last_partial_line(self):
+ num_chars = len(self._last_partial_line)
+ self._stream.write(self._erasure(self._last_partial_line))
+ self._last_partial_line = ''
+
+
+class _LogHandler(logging.Handler):
+ def __init__(self, meter):
+ logging.Handler.__init__(self)
+ self._meter = meter
+ self.name = LOG_HANDLER_NAME
+
+ def emit(self, record):
+ self._meter.writeln(record.getMessage(), record.created)
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
index 359cfcf60..b388ec608 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -27,55 +27,131 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import logging
+import re
import StringIO
import unittest
from webkitpy.layout_tests.views.metered_stream import MeteredStream
-class TestMeteredStream(unittest.TestCase):
- def test_regular(self):
- a = StringIO.StringIO()
- m = MeteredStream(a)
- self.assertFalse(a.getvalue())
-
- # basic test
- m.write("foo")
- exp = ['foo']
- self.assertEquals(a.buflist, exp)
-
- # now check that a second write() does not overwrite the first.
- m.write("bar")
- exp.append('bar')
- self.assertEquals(a.buflist, exp)
-
- m.update("batter")
- exp.append('batter')
- self.assertEquals(a.buflist, exp)
-
- # The next update() should overwrite the laste update() but not the
- # other text. Note that the cursor is effectively positioned at the
- # end of 'foo', even though we had to erase three more characters.
- m.update("foo")
- exp.append('\b\b\b\b\b\b \b\b\b\b\b\b')
- exp.append('foo')
- self.assertEquals(a.buflist, exp)
-
- # now check that a write() does overwrite the update
- m.write("foo")
- exp.append('\b\b\b \b\b\b')
- exp.append('foo')
- self.assertEquals(a.buflist, exp)
-
- # Now test that we only back up to the most recent newline.
-
- # Note also that we do not back up to erase the most recent write(),
- # i.e., write()s do not get erased.
- a = StringIO.StringIO()
- m = MeteredStream(a)
- m.update("foo\nbar")
- m.update("baz")
- self.assertEquals(a.buflist, ['foo\nbar', '\b\b\b \b\b\b', 'baz'])
+class RegularTest(unittest.TestCase):
+ verbose = False
+ isatty = False
+
+ def setUp(self):
+ self.stream = StringIO.StringIO()
+ self.buflist = self.stream.buflist
+ self.stream.isatty = lambda: self.isatty
+
+ # configure a logger to test that log calls do normally get included.
+ self.logger = logging.getLogger(__name__)
+ self.logger.setLevel(logging.DEBUG)
+ self.logger.propagate = False
+
+ # add a dummy time counter for a default behavior.
+ self.times = range(10)
+
+ self.meter = MeteredStream(self.stream, self.verbose, self.logger, self.time_fn, 8675)
+
+ def tearDown(self):
+ if self.meter:
+ self.meter.cleanup()
+ self.meter = None
+
+ def time_fn(self):
+ return self.times.pop(0)
+
+ def test_logging_not_included(self):
+ # This tests that if we don't hand a logger to the MeteredStream,
+ # nothing is logged.
+ logging_stream = StringIO.StringIO()
+ handler = logging.StreamHandler(logging_stream)
+ root_logger = logging.getLogger()
+ orig_level = root_logger.level
+ root_logger.addHandler(handler)
+ root_logger.setLevel(logging.DEBUG)
+ try:
+ self.meter = MeteredStream(self.stream, self.verbose, None, self.time_fn, 8675)
+ self.meter.write_throttled_update('foo')
+ self.meter.write_update('bar')
+ self.meter.write('baz')
+ self.assertEquals(logging_stream.buflist, [])
+ finally:
+ root_logger.removeHandler(handler)
+ root_logger.setLevel(orig_level)
+
+ def _basic(self, times):
+ self.times = times
+ self.meter.write_update('foo')
+ self.meter.write_update('bar')
+ self.meter.write_throttled_update('baz')
+ self.meter.write_throttled_update('baz 2')
+ self.meter.writeln('done')
+ self.assertEquals(self.times, [])
+ return self.buflist
+
+ def test_basic(self):
+ buflist = self._basic([0, 1, 2, 13, 14])
+ self.assertEquals(buflist, ['foo\n', 'bar\n', 'baz 2\n', 'done\n'])
+
+ def _log_after_update(self):
+ self.meter.write_update('foo')
+ self.logger.info('bar')
+ return self.buflist
+
+ def test_log_after_update(self):
+ buflist = self._log_after_update()
+ self.assertEquals(buflist, ['foo\n', 'bar\n'])
+
+ def test_log_args(self):
+ self.logger.info('foo %s %d', 'bar', 2)
+ self.assertEquals(self.buflist, ['foo bar 2\n'])
+
+class TtyTest(RegularTest):
+ verbose = False
+ isatty = True
+
+ def test_basic(self):
+ buflist = self._basic([0, 1, 1.05, 1.1, 2])
+ self.assertEquals(buflist, ['foo',
+ MeteredStream._erasure('foo'), 'bar',
+ MeteredStream._erasure('bar'), 'baz 2',
+ MeteredStream._erasure('baz 2'), 'done\n'])
+
+ def test_log_after_update(self):
+ buflist = self._log_after_update()
+ self.assertEquals(buflist, ['foo',
+ MeteredStream._erasure('foo'), 'bar\n'])
+
+
+class VerboseTest(RegularTest):
+ isatty = False
+ verbose = True
+
+ def test_basic(self):
+ buflist = self._basic([0, 1, 2.1, 13, 14.1234])
+ # We don't bother to match the hours and minutes of the timestamp since
+ # the local timezone can vary and we can't set that portably and easily.
+ self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
+ self.assertTrue(re.match('\d\d:\d\d:01.000 8675 bar\n', buflist[1]))
+ self.assertTrue(re.match('\d\d:\d\d:13.000 8675 baz 2\n', buflist[2]))
+ self.assertTrue(re.match('\d\d:\d\d:14.123 8675 done\n', buflist[3]))
+ self.assertEquals(len(buflist), 4)
+
+ def test_log_after_update(self):
+ buflist = self._log_after_update()
+ self.assertTrue(re.match('\d\d:\d\d:00.000 8675 foo\n', buflist[0]))
+
+ # The second argument should have a real timestamp and pid, so we just check the format.
+ self.assertTrue(re.match('\d\d:\d\d:\d\d.\d\d\d \d+ bar\n', buflist[1]))
+
+ self.assertEquals(len(buflist), 2)
+
+ def test_log_args(self):
+ self.logger.info('foo %s %d', 'bar', 2)
+ self.assertEquals(len(self.buflist), 1)
+ self.assertTrue(self.buflist[0].endswith('foo bar 2\n'))
if __name__ == '__main__':
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
index 750c48df9..3df2956c1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -29,21 +29,13 @@
"""Package that handles non-debug, non-file output for run-webkit-tests."""
-import logging
import optparse
-import time
from webkitpy.common.net import resultsjsonparser
-from webkitpy.layout_tests.views.metered_stream import MeteredStream
from webkitpy.layout_tests.models.test_expectations import TestExpectations
-
-
-_log = logging.getLogger(__name__)
-
+from webkitpy.layout_tests.views.metered_stream import MeteredStream
NUM_SLOW_TESTS_TO_LOG = 10
-FAST_UPDATES_SECONDS = 0.03
-SLOW_UPDATES_SECONDS = 10.0
PRINT_DEFAULT = "misc,one-line-progress,one-line-summary,unexpected,unexpected-results,updates"
PRINT_EVERYTHING = "actual,config,expected,misc,one-line-progress,one-line-summary,slowest,timing,unexpected,unexpected-results,updates"
@@ -154,27 +146,6 @@ def parse_print_options(print_options, verbose):
return switches
-def _configure_logging(stream, verbose):
- log_fmt = '%(message)s'
- log_datefmt = '%y%m%d %H:%M:%S'
- log_level = logging.INFO
- if verbose:
- log_fmt = '%(asctime)s %(process)d %(filename)s:%(lineno)d %(levelname)s %(message)s'
- log_level = logging.DEBUG
-
- root = logging.getLogger()
- handler = logging.StreamHandler(stream)
- handler.setFormatter(logging.Formatter(log_fmt, None))
- root.addHandler(handler)
- root.setLevel(log_level)
- return handler
-
-
-def _restore_logging(handler_to_remove):
- root = logging.getLogger()
- root.handlers.remove(handler_to_remove)
-
-
class Printer(object):
"""Class handling all non-debug-logging printing done by run-webkit-tests.
@@ -187,7 +158,7 @@ class Printer(object):
By default the buildbot-parsed code gets logged to stdout, and regular
output gets logged to stderr."""
- def __init__(self, port, options, regular_output, buildbot_output, configure_logging):
+ def __init__(self, port, options, regular_output, buildbot_output, logger=None):
"""
Args
port interface to port-specific routines
@@ -196,37 +167,16 @@ class Printer(object):
should be written
buildbot_output stream to which output intended to be read by
the buildbots (and humans) should be written
- configure_loggign Whether a logging handler should be registered
-
+ logger optional logger to integrate into the stream.
"""
self._port = port
self._options = options
- self._stream = regular_output
self._buildbot_stream = buildbot_output
- self._meter = None
-
- # These are used for --print one-line-progress
- self._last_remaining = None
- self._last_update_time = None
-
+ self._meter = MeteredStream(regular_output, options.verbose, logger=logger)
self.switches = parse_print_options(options.print_options, options.verbose)
- self._logging_handler = None
- if self._stream.isatty() and not options.verbose:
- self._update_interval_seconds = FAST_UPDATES_SECONDS
- self._meter = MeteredStream(self._stream)
- if configure_logging:
- self._logging_handler = _configure_logging(self._meter, options.verbose)
- else:
- self._update_interval_seconds = SLOW_UPDATES_SECONDS
- if configure_logging:
- self._logging_handler = _configure_logging(self._stream, options.verbose)
-
def cleanup(self):
- """Restore logging configuration to its initial settings."""
- if self._logging_handler:
- _restore_logging(self._logging_handler)
- self._logging_handler = None
+ self._meter.cleanup()
def __del__(self):
self.cleanup()
@@ -350,27 +300,19 @@ class Printer(object):
if self.disabled('one-line-progress'):
return
- now = time.time()
- if self._last_update_time is None:
- self._last_update_time = now
-
- time_since_last_update = now - self._last_update_time
- if time_since_last_update <= self._update_interval_seconds:
+ if result_summary.remaining == 0:
+ self._meter.write_update('')
return
- self._last_update_time = now
-
percent_complete = 100 * (result_summary.expected +
result_summary.unexpected) / result_summary.total
action = "Testing"
if retrying:
action = "Retrying"
- self._update("%s (%d%%): %d ran as expected, %d didn't, %d left" %
- (action, percent_complete, result_summary.expected,
- result_summary.unexpected, result_summary.remaining))
- if result_summary.remaining == 0:
- self._update('')
+ self._meter.write_throttled_update("%s (%d%%): %d ran as expected, %d didn't, %d left" %
+ (action, percent_complete, result_summary.expected,
+ result_summary.unexpected, result_summary.remaining))
def print_unexpected_results(self, unexpected_results):
"""Prints a list of the unexpected results to the buildbot stream."""
@@ -458,7 +400,7 @@ class Printer(object):
def print_update(self, msg):
if self.disabled('updates'):
return
- self._update(msg)
+ self._meter.write_update(msg)
def write(self, msg, option="misc"):
if self.disabled(option):
@@ -466,10 +408,4 @@ class Printer(object):
self._write(msg)
def _write(self, msg):
- _log.info(msg)
-
- def _update(self, msg):
- if self._meter:
- self._meter.update(msg)
- else:
- self._write(msg)
+ self._meter.writeln(msg)
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
index 6899d2c5d..8777fd8d6 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
@@ -1,5 +1,5 @@
#!/usr/bin/python
-# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -29,7 +29,6 @@
"""Unit tests for printing.py."""
-import logging
import optparse
import StringIO
import time
@@ -54,35 +53,6 @@ def get_options(args):
class TestUtilityFunctions(unittest.TestCase):
- def assertEmpty(self, stream):
- self.assertFalse(stream.getvalue())
-
- def assertNotEmpty(self, stream):
- self.assertTrue(stream.getvalue())
-
- def assertWritten(self, stream, contents):
- self.assertEquals(stream.buflist, contents)
-
- def test_configure_logging(self):
- options, args = get_options([])
- stream = StringIO.StringIO()
- handler = printing._configure_logging(stream, options.verbose)
- logging.info("this should be logged")
- self.assertNotEmpty(stream)
-
- stream = StringIO.StringIO()
- logging.debug("this should not be logged")
- self.assertEmpty(stream)
-
- printing._restore_logging(handler)
-
- stream = StringIO.StringIO()
- options, args = get_options(['--verbose'])
- handler = printing._configure_logging(stream, options.verbose)
- logging.debug("this should be logged")
- self.assertNotEmpty(stream)
- printing._restore_logging(handler)
-
def test_print_options(self):
options, args = get_options([])
self.assertTrue(options is not None)
@@ -144,8 +114,7 @@ class Testprinter(unittest.TestCase):
regular_output = StringIO.StringIO()
regular_output.isatty = lambda: tty
buildbot_output = StringIO.StringIO()
- printer = printing.Printer(self._port, options, regular_output,
- buildbot_output, configure_logging=True)
+ printer = printing.Printer(self._port, options, regular_output, buildbot_output)
return printer, regular_output, buildbot_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
@@ -350,7 +319,7 @@ class Testprinter(unittest.TestCase):
'failures/expected/crash.html']
paths, rs, exp = self.get_result_summary(tests, expectations)
- # First, test that we print nothing.
+ # First, test that we print nothing when we shouldn't print anything.
printer.print_progress(rs, False, paths)
self.assertEmpty(out)
self.assertEmpty(err)
@@ -359,55 +328,28 @@ class Testprinter(unittest.TestCase):
self.assertEmpty(out)
self.assertEmpty(err)
- self.times = [1, 2, 12, 13, 14, 23, 33]
-
- def mock_time():
- return self.times.pop(0)
-
- orig_time = time.time
- try:
- time.time = mock_time
-
- # Test printing progress updates to a file.
- printer, err, out = self.get_printer(['--print', 'one-line-progress'])
- printer.print_progress(rs, False, paths)
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertEmpty(err)
-
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
- self.reset(err)
- self.reset(out)
- printer.print_progress(rs, True, paths)
- self.assertEmpty(out)
- self.assertEmpty(err)
-
- printer.print_progress(rs, True, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
- # Now reconfigure the printer to test printing to a TTY instead of a file.
- self.times = [1, 1.01, 2, 3]
- printer, err, out = self.get_printer(['--print', 'one-line-progress'], tty=True)
- printer.print_progress(rs, False, paths)
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertEmpty(err)
-
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
- self.reset(err)
- self.reset(out)
- printer.print_progress(rs, True, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
- finally:
- time.time = orig_time
+ # Now test that we do print things.
+ printer, err, out = self.get_printer(['--print', 'one-line-progress'])
+ printer.print_progress(rs, False, paths)
+ self.assertEmpty(out)
+ self.assertNotEmpty(err)
+
+ printer, err, out = self.get_printer(['--print', 'one-line-progress'])
+ printer.print_progress(rs, True, paths)
+ self.assertEmpty(out)
+ self.assertNotEmpty(err)
+
+ printer, err, out = self.get_printer(['--print', 'one-line-progress'])
+ rs.remaining = 0
+ printer.print_progress(rs, False, paths)
+ self.assertEmpty(out)
+ self.assertNotEmpty(err)
+
+ printer.print_progress(rs, True, paths)
+ self.assertEmpty(out)
+ self.assertNotEmpty(err)
+
+
def test_write_nothing(self):
printer, err, out = self.get_printer(['--print', 'nothing'])
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
new file mode 100644
index 000000000..509dd1d2b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import logging
+import math
+import re
+
+from webkitpy.layout_tests.port.driver import DriverInput
+
+
+_log = logging.getLogger(__name__)
+
+
+class PerfTest(object):
+ def __init__(self, test_name, path_or_url):
+ self._test_name = test_name
+ self._path_or_url = path_or_url
+
+ def test_name(self):
+ return self._test_name
+
+ def path_or_url(self):
+ return self._path_or_url
+
+ def run(self, driver, timeout_ms):
+ output = driver.run_test(DriverInput(self.path_or_url(), timeout_ms, None, False))
+ if self.run_failed(output):
+ return None
+ return self.parse_output(output)
+
+ def run_failed(self, output):
+ if output.text == None or output.error:
+ pass
+ elif output.timeout:
+ _log.error('timeout: %s' % self.test_name())
+ elif output.crash:
+ _log.error('crash: %s' % self.test_name())
+ else:
+ return False
+
+ if output.error:
+ _log.error('error: %s\n%s' % (self.test_name(), output.error))
+
+ return True
+
+ _lines_to_ignore_in_parser_result = [
+ re.compile(r'^Running \d+ times$'),
+ re.compile(r'^Ignoring warm-up '),
+ re.compile(r'^Info:'),
+ re.compile(r'^\d+(.\d+)?$'),
+ # Following are for handle existing test like Dromaeo
+ re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
+ re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
+ re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))]
+
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max']
+
+ def _should_ignore_line_in_parser_test_result(self, line):
+ if not line:
+ return True
+ for regex in self._lines_to_ignore_in_parser_result:
+ if regex.search(line):
+ return True
+ return False
+
+ def parse_output(self, output):
+ got_a_result = False
+ test_failed = False
+ results = {}
+ score_regex = re.compile(r'^(?P<key>' + r'|'.join(self._statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
+ unit = "ms"
+
+ for line in re.split('\n', output.text):
+ score = score_regex.match(line)
+ if score:
+ results[score.group('key')] = float(score.group('value'))
+ if score.group('unit'):
+ unit = score.group('unit')
+ continue
+
+ if not self._should_ignore_line_in_parser_test_result(line):
+ test_failed = True
+ _log.error(line)
+
+ if test_failed or set(self._statistics_keys) != set(results.keys()):
+ return None
+
+ results['unit'] = unit
+
+ test_name = re.sub(r'\.\w+$', '', self._test_name)
+ self.output_statistics(test_name, results)
+
+ return {test_name: results}
+
+ def output_statistics(self, test_name, results):
+ unit = results['unit']
+ _log.info('RESULT %s= %s %s' % (test_name.replace('/', ': '), results['avg'], unit))
+ _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:]]))
+
+
+class ChromiumStylePerfTest(PerfTest):
+ _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
+
+ def __init__(self, test_name, path_or_url):
+ super(ChromiumStylePerfTest, self).__init__(test_name, path_or_url)
+
+ def parse_output(self, output):
+ test_failed = False
+ got_a_result = False
+ results = {}
+ for line in re.split('\n', output.text):
+ resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
+ if resultLine:
+ # FIXME: Store the unit
+ results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
+ _log.info(line)
+ elif not len(line) == 0:
+ test_failed = True
+ _log.error(line)
+ return results if results and not test_failed else None
+
+
+class PageLoadingPerfTest(PerfTest):
+ def __init__(self, test_name, path_or_url):
+ super(PageLoadingPerfTest, self).__init__(test_name, path_or_url)
+
+ def run(self, driver, timeout_ms):
+ test_times = []
+
+ for i in range(0, 20):
+ output = driver.run_test(DriverInput(self.path_or_url(), timeout_ms, None, False))
+ if self.run_failed(output):
+ return None
+ if i == 0:
+ continue
+ test_times.append(output.test_time * 1000)
+
+ test_times = sorted(test_times)
+
+ # Compute the mean and variance using a numerically stable algorithm.
+ squareSum = 0
+ mean = 0
+ valueSum = sum(test_times)
+ for i, time in enumerate(test_times):
+ delta = time - mean
+ sweep = i + 1.0
+ mean += delta / sweep
+ squareSum += delta * delta * (i / sweep)
+
+ middle = int(len(test_times) / 2)
+ results = {'avg': mean,
+ 'min': min(test_times),
+ 'max': max(test_times),
+ 'median': test_times[middle] if len(test_times) % 2 else (test_times[middle - 1] + test_times[middle]) / 2,
+ 'stdev': math.sqrt(squareSum),
+ 'unit': 'ms'}
+ self.output_statistics(self.test_name(), results)
+ return {self.test_name(): results}
+
+
+class PerfTestFactory(object):
+
+ _pattern_map = [
+ (re.compile('^inspector/'), ChromiumStylePerfTest),
+ (re.compile('^PageLoad/'), PageLoadingPerfTest),
+ ]
+
+ @classmethod
+ def create_perf_test(cls, test_name, path):
+ for (pattern, test_class) in cls._pattern_map:
+ if pattern.match(test_name):
+ return test_class(test_name, path)
+ return PerfTest(test_name, path)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
new file mode 100755
index 000000000..21efd2c3c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -0,0 +1,146 @@
+#!/usr/bin/python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import math
+import unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PageLoadingPerfTest
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftest import PerfTestFactory
+
+
+class MainTest(unittest.TestCase):
+ def test_parse_output(self):
+ output = DriverOutput('\n'.join([
+ 'Running 20 times',
+ 'Ignoring warm-up run (1115)',
+ '',
+ 'avg 1100',
+ 'median 1101',
+ 'stdev 11',
+ 'min 1080',
+ 'max 1120']), image=None, image_hash=None, audio=None)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ test = PerfTest('some-test', '/path/some-dir/some-test')
+ self.assertEqual(test.parse_output(output),
+ {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
+
+ def test_parse_output_with_failing_line(self):
+ output = DriverOutput('\n'.join([
+ 'Running 20 times',
+ 'Ignoring warm-up run (1115)',
+ '',
+ 'some-unrecognizable-line',
+ '',
+ 'avg 1100',
+ 'median 1101',
+ 'stdev 11',
+ 'min 1080',
+ 'max 1120']), image=None, image_hash=None, audio=None)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ test = PerfTest('some-test', '/path/some-dir/some-test')
+ self.assertEqual(test.parse_output(output), None)
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
+
+
+class TestPageLoadingPerfTest(unittest.TestCase):
+ class MockDriver(object):
+ def __init__(self, values):
+ self._values = values
+ self._index = 0
+
+ def run_test(self, input):
+ value = self._values[self._index]
+ self._index += 1
+ if isinstance(value, str):
+ return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value)
+ else:
+ return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1])
+
+ def test_run(self):
+ test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test')
+ driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ self.assertEqual(test.run(driver, None),
+ {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': math.sqrt(570 * 1000 * 1000), 'min': 2000, 'unit': 'ms'}})
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 23874.6727726 ms, min= 2000 ms, max= 20000 ms\n')
+
+ def test_run_with_bad_output(self):
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ test = PageLoadingPerfTest('some-test', '/path/some-dir/some-test')
+ driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20])
+ self.assertEqual(test.run(driver, None), None)
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
+
+
+class TestPerfTestFactory(unittest.TestCase):
+ def test_regular_test(self):
+ test = PerfTestFactory.create_perf_test('some-dir/some-test', '/path/some-dir/some-test')
+ self.assertEqual(test.__class__, PerfTest)
+
+ def test_inspector_test(self):
+ test = PerfTestFactory.create_perf_test('inspector/some-test', '/path/inspector/some-test')
+ self.assertEqual(test.__class__, ChromiumStylePerfTest)
+
+ def test_page_loading_test(self):
+ test = PerfTestFactory.create_perf_test('PageLoad/some-test', '/path/PageLoad/some-test')
+ self.assertEqual(test.__class__, PageLoadingPerfTest)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index 8cc0d745e..b4c29490a 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -39,21 +39,20 @@ import time
from webkitpy.common import find_files
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
-from webkitpy.layout_tests.port.driver import DriverInput
from webkitpy.layout_tests.views import printing
+from webkitpy.performance_tests.perftest import PerfTestFactory
+
_log = logging.getLogger(__name__)
class PerfTestsRunner(object):
- _test_directories_for_chromium_style_tests = ['inspector']
_default_branch = 'webkit-trunk'
_EXIT_CODE_BAD_BUILD = -1
_EXIT_CODE_BAD_JSON = -2
_EXIT_CODE_FAILED_UPLOADING = -3
- def __init__(self, regular_output=sys.stderr, buildbot_output=sys.stdout, args=None, port=None):
- self._buildbot_output = buildbot_output
+ def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
@@ -62,7 +61,6 @@ class PerfTestsRunner(object):
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
- self._printer = printing.Printer(self._port, self._options, regular_output, buildbot_output, configure_logging=False)
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
@@ -70,8 +68,6 @@ class PerfTestsRunner(object):
@staticmethod
def _parse_args(args=None):
- print_options = printing.print_options()
-
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
@@ -102,33 +98,35 @@ class PerfTestsRunner(object):
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
]
-
- option_list = (perf_option_list + print_options)
- return optparse.OptionParser(option_list=option_list).parse_args(args)
+ return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
def _is_test_file(filesystem, dirname, filename):
- return filename.endswith('.html')
+ return filesystem.splitext(filename)[1] in ['.html', '.svg']
+
+ filesystem = self._host.filesystem
paths = []
for arg in self._args:
paths.append(arg)
- relpath = self._host.filesystem.relpath(arg, self._base_path)
+ relpath = filesystem.relpath(arg, self._base_path)
if relpath:
paths.append(relpath)
skipped_directories = set(['.svn', 'resources'])
- tests = find_files.find(self._host.filesystem, self._base_path, paths, skipped_directories, _is_test_file)
- return [test for test in tests if not self._port.skips_perf_test(self._port.relative_perf_test_filename(test))]
+ test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
+ tests = []
+ for path in test_files:
+ relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
+ if self._port.skips_perf_test(relative_path):
+ continue
+ tests.append(PerfTestFactory.create_perf_test(relative_path, path))
- def run(self):
- if self._options.help_printing:
- self._printer.help_printing()
- self._printer.cleanup()
- return 0
+ return tests
+ def run(self):
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self._EXIT_CODE_BAD_BUILD
@@ -136,11 +134,8 @@ class PerfTestsRunner(object):
# We wrap any parts of the run that are slow or likely to raise exceptions
# in a try/finally to ensure that we clean up the logging configuration.
unexpected = -1
- try:
- tests = self._collect_tests()
- unexpected = self._run_tests_set(sorted(list(tests)), self._port)
- finally:
- self._printer.cleanup()
+ tests = self._collect_tests()
+ unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
options = self._options
if self._options.output_json_path:
@@ -200,7 +195,7 @@ class PerfTestsRunner(object):
_log.error(line)
return False
- self._printer.write("JSON file uploaded.")
+ _log.info("JSON file uploaded.")
return True
def _print_status(self, tests, expected, unexpected):
@@ -210,7 +205,7 @@ class PerfTestsRunner(object):
status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
if unexpected:
status += " (%d didn't run)" % unexpected
- self._printer.write(status)
+ _log.info(status)
def _run_tests_set(self, tests, port):
result_count = len(tests)
@@ -227,108 +222,27 @@ class PerfTestsRunner(object):
driver.stop()
return unexpected
- relative_test_path = self._host.filesystem.relpath(test, self._base_path)
- self._printer.write('Running %s (%d of %d)' % (relative_test_path, expected + unexpected + 1, len(tests)))
-
- is_chromium_style = self._host.filesystem.split(relative_test_path)[0] in self._test_directories_for_chromium_style_tests
- if self._run_single_test(test, driver, is_chromium_style):
+ _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
+ if self._run_single_test(test, driver):
expected = expected + 1
else:
unexpected = unexpected + 1
- self._printer.write('')
+ _log.info('')
driver.stop()
return unexpected
- _inspector_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
-
- def _process_chromium_style_test_result(self, test, output):
- test_failed = False
- got_a_result = False
- for line in re.split('\n', output.text):
- resultLine = self._inspector_result_regex.match(line)
- if resultLine:
- self._results[resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
- self._buildbot_output.write("%s\n" % line)
- got_a_result = True
- elif not len(line) == 0:
- test_failed = True
- self._printer.write("%s" % line)
- return test_failed or not got_a_result
-
- _lines_to_ignore_in_parser_result = [
- re.compile(r'^Running \d+ times$'),
- re.compile(r'^Ignoring warm-up '),
- re.compile(r'^Info:'),
- re.compile(r'^\d+(.\d+)?$'),
- # Following are for handle existing test like Dromaeo
- re.compile(re.escape("""main frame - has 1 onunload handler(s)""")),
- re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
- re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)"""))]
-
- def _should_ignore_line_in_parser_test_result(self, line):
- if not line:
- return True
- for regex in self._lines_to_ignore_in_parser_result:
- if regex.search(line):
- return True
- return False
-
- def _process_parser_test_result(self, test, output):
- got_a_result = False
- test_failed = False
- filesystem = self._host.filesystem
- category, test_name = filesystem.split(filesystem.relpath(test, self._base_path))
- test_name = filesystem.splitext(test_name)[0]
- results = {}
- keys = ['avg', 'median', 'stdev', 'min', 'max']
- score_regex = re.compile(r'^(' + r'|'.join(keys) + r')\s+([0-9\.]+)')
- for line in re.split('\n', output.text):
- score = score_regex.match(line)
- if score:
- results[score.group(1)] = float(score.group(2))
- continue
-
- if not self._should_ignore_line_in_parser_test_result(line):
- test_failed = True
- self._printer.write("%s" % line)
-
- if test_failed or set(keys) != set(results.keys()):
- return True
- self._results[filesystem.join(category, test_name).replace('\\', '/')] = results
- self._buildbot_output.write('RESULT %s: %s= %s ms\n' % (category, test_name, results['avg']))
- self._buildbot_output.write(', '.join(['%s= %s ms' % (key, results[key]) for key in keys[1:]]) + '\n')
- return False
-
- def _run_single_test(self, test, driver, is_chromium_style):
- test_failed = False
+ def _run_single_test(self, test, driver):
start_time = time.time()
- output = driver.run_test(DriverInput(test, self._options.time_out_ms, None, False))
-
- if output.text == None:
- test_failed = True
- elif output.timeout:
- self._printer.write('timeout: %s' % test[self._webkit_base_dir_len + 1:])
- test_failed = True
- elif output.crash:
- self._printer.write('crash: %s' % test[self._webkit_base_dir_len + 1:])
- test_failed = True
+ new_results = test.run(driver, self._options.time_out_ms)
+ if new_results:
+ self._results.update(new_results)
else:
- if is_chromium_style:
- test_failed = self._process_chromium_style_test_result(test, output)
- else:
- test_failed = self._process_parser_test_result(test, output)
-
- if len(output.error):
- self._printer.write('error:\n%s' % output.error)
- test_failed = True
-
- if test_failed:
- self._printer.write('FAILED')
+ _log.error('FAILED')
- self._printer.write("Finished: %f s" % (time.time() - start_time))
+ _log.debug("Finished: %f s" % (time.time() - start_time))
- return not test_failed
+ return new_results != None
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index 1a287a7cf..be925c953 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -39,6 +39,8 @@ from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.port.test import TestPort
from webkitpy.layout_tests.views import printing
+from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
+from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
@@ -109,15 +111,12 @@ max 1120
def stop(self):
"""do nothing"""
- def create_runner(self, buildbot_output=None, args=[], regular_output=None, driver_class=TestDriver):
- buildbot_output = buildbot_output or StringIO.StringIO()
- regular_output = regular_output or StringIO.StringIO()
-
+ def create_runner(self, args=[], driver_class=TestDriver):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
- runner = PerfTestsRunner(regular_output, buildbot_output, args=args, port=test_port)
+ runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
@@ -126,7 +125,7 @@ max 1120
def run_test(self, test_name):
runner = self.create_runner()
driver = MainTest.TestDriver()
- return runner._run_single_test(test_name, driver, is_chromium_style=True)
+ return runner._run_single_test(ChromiumStylePerfTest(test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
def test_run_passing_test(self):
self.assertTrue(self.run_test('pass.html'))
@@ -146,15 +145,30 @@ max 1120
def test_run_crash_test(self):
self.assertFalse(self.run_test('crash.html'))
+ def _tests_for_runner(self, runner, test_names):
+ filesystem = runner._host.filesystem
+ tests = []
+ for test in test_names:
+ path = filesystem.join(runner._base_path, test)
+ dirname = filesystem.dirname(path)
+ if test.startswith('inspector/'):
+ tests.append(ChromiumStylePerfTest(test, path))
+ else:
+ tests.append(PerfTest(test, path))
+ return tests
+
def test_run_test_set(self):
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output)
- dirname = runner._base_path + '/inspector/'
- tests = [dirname + 'pass.html', dirname + 'silent.html', dirname + 'failed.html',
- dirname + 'tonguey.html', dirname + 'timeout.html', dirname + 'crash.html']
- unexpected_result_count = runner._run_tests_set(tests, runner._port)
+ runner = self.create_runner()
+ tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+ 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner._run_tests_set(tests, runner._port)
+ finally:
+ stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, len(tests) - 1)
- self.assertWritten(buildbot_output, ['RESULT group_name: test_name= 42 ms\n'])
+ self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
def test_run_test_set_kills_drt_per_run(self):
@@ -164,14 +178,12 @@ max 1120
def stop(self):
TestDriverWithStopCount.stop_count += 1
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output, driver_class=TestDriverWithStopCount)
-
- dirname = runner._base_path + '/inspector/'
- tests = [dirname + 'pass.html', dirname + 'silent.html', dirname + 'failed.html',
- dirname + 'tonguey.html', dirname + 'timeout.html', dirname + 'crash.html']
+ runner = self.create_runner(driver_class=TestDriverWithStopCount)
+ tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+ 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
unexpected_result_count = runner._run_tests_set(tests, runner._port)
+
self.assertEqual(TestDriverWithStopCount.stop_count, 6)
def test_run_test_pause_before_testing(self):
@@ -181,80 +193,101 @@ max 1120
def start(self):
TestDriverWithStartCount.start_count += 1
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output, args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
-
- dirname = runner._base_path + '/inspector/'
- tests = [dirname + 'pass.html']
+ runner = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
+ tests = self._tests_for_runner(runner, ['inspector/pass.html'])
+ output = OutputCapture()
+ output.capture_output()
try:
- output = OutputCapture()
- output.capture_output()
unexpected_result_count = runner._run_tests_set(tests, runner._port)
self.assertEqual(TestDriverWithStartCount.start_count, 1)
finally:
- _, stderr, logs = output.restore_output()
- self.assertEqual(stderr, "Ready to run test?\n")
- self.assertTrue("Running inspector/pass.html (1 of 1)" in logs)
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(stderr, "Ready to run test?\n")
+ self.assertEqual(log, "Running inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\n\n")
def test_run_test_set_for_parser_tests(self):
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output)
- tests = [runner._base_path + '/Bindings/event-target-wrapper.html', runner._base_path + '/Parser/some-parser.html']
- unexpected_result_count = runner._run_tests_set(tests, runner._port)
+ runner = self.create_runner()
+ tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner._run_tests_set(tests, runner._port)
+ finally:
+ stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
- self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n',
- 'RESULT Parser: some-parser= 1100.0 ms\n',
- 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n'])
+ self.assertEqual(log, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+ 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+ 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+ '',
+ 'Running Parser/some-parser.html (2 of 2)',
+ 'RESULT Parser: some-parser= 1100.0 ms',
+ 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
+ '', '']))
def test_run_test_set_with_json_output(self):
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json'])
+ runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
runner._timestamp = 123456789
- self.assertEqual(runner.run(), 0)
- self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n',
- 'RESULT group_name: test_name= 42 ms\n'])
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ self.assertEqual(runner.run(), 0)
+ finally:
+ stdout, stderr, logs = output_capture.restore_output()
+
+ self.assertEqual(logs,
+ '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+ 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+ 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+ '',
+ 'Running inspector/pass.html (2 of 2)',
+ 'RESULT group_name: test_name= 42 ms',
+ '', '']))
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
- {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46},
- "group_name:test_name": 42},
+ {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
+ "inspector/pass.html:group_name:test_name": 42},
"webkit-revision": 5678})
def test_run_test_set_with_json_source(self):
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json',
- '--source-json-path=/mock-checkout/source.json'])
+ runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json', '--source-json-path=/mock-checkout/source.json'])
runner._host.filesystem.files['/mock-checkout/source.json'] = '{"key": "value"}'
runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
runner._host.filesystem.files[runner._base_path + '/Bindings/event-target-wrapper.html'] = True
runner._timestamp = 123456789
- self.assertEqual(runner.run(), 0)
- self.assertWritten(buildbot_output, ['RESULT Bindings: event-target-wrapper= 1489.05 ms\n',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms\n',
- 'RESULT group_name: test_name= 42 ms\n'])
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ self.assertEqual(runner.run(), 0)
+ finally:
+ stdout, stderr, logs = output_capture.restore_output()
+
+ self.assertEqual(logs, '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
+ 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+ 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+ '',
+ 'Running inspector/pass.html (2 of 2)',
+ 'RESULT group_name: test_name= 42 ms',
+ '', '']))
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
"timestamp": 123456789, "results":
- {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46},
- "group_name:test_name": 42},
+ {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
+ "inspector/pass.html:group_name:test_name": 42},
"webkit-revision": 5678,
"key": "value"})
def test_run_test_set_with_multiple_repositories(self):
- buildbot_output = StringIO.StringIO()
- runner = self.create_runner(buildbot_output, args=['--output-json-path=/mock-checkout/output.json'])
+ runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
runner._host.filesystem.files[runner._base_path + '/inspector/pass.html'] = True
runner._timestamp = 123456789
runner._port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self.assertEqual(runner.run(), 0)
-
self.assertEqual(json.loads(runner._host.filesystem.files['/mock-checkout/output.json']), {
- "timestamp": 123456789, "results": {"group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
+ "timestamp": 123456789, "results": {"inspector/pass.html:group_name:test_name": 42.0}, "webkit-revision": 5678, "some-revision": 5678})
def test_run_with_upload_json(self):
runner = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
@@ -287,8 +320,7 @@ max 1120
self.assertEqual(runner.run(), -3)
def test_upload_json(self):
- regular_output = StringIO.StringIO()
- runner = self.create_runner(regular_output=regular_output)
+ runner = self.create_runner()
runner._host.filesystem.files['/mock-checkout/some.json'] = 'some content'
called = []
@@ -333,6 +365,9 @@ max 1120
tests = runner._collect_tests()
self.assertEqual(len(tests), 1)
+ def _collect_tests_and_sort_test_name(self, runner):
+ return sorted([test.test_name() for test in runner._collect_tests()])
+
def test_collect_tests(self):
runner = self.create_runner(args=['PerformanceTests/test1.html', 'test2.html'])
@@ -343,8 +378,7 @@ max 1120
add_file('test2.html')
add_file('test3.html')
runner._host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
- tests = [runner._port.relative_perf_test_filename(test) for test in runner._collect_tests()]
- self.assertEqual(sorted(tests), ['test1.html', 'test2.html'])
+ self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
def test_collect_tests_with_skipped_list(self):
runner = self.create_runner()
@@ -360,13 +394,24 @@ max 1120
add_file('inspector/resources', 'resource_file.html')
add_file('unsupported', 'unsupported_test2.html')
runner._port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
- tests = [runner._port.relative_perf_test_filename(test) for test in runner._collect_tests()]
- self.assertEqual(sorted(tests), ['inspector/test1.html', 'inspector/test2.html'])
+ self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
+
+ def test_collect_tests_with_page_load_svg(self):
+ runner = self.create_runner()
+
+ def add_file(dirname, filename, content=True):
+ dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
+ runner._host.filesystem.maybe_make_directory(dirname)
+ runner._host.filesystem.files[runner._host.filesystem.join(dirname, filename)] = content
+
+ add_file('PageLoad', 'some-svg-test.svg')
+ tests = runner._collect_tests()
+ self.assertEqual(len(tests), 1)
+ self.assertEqual(tests[0].__class__.__name__, 'PageLoadingPerfTest')
def test_parse_args(self):
runner = self.create_runner()
options, args = PerfTestsRunner._parse_args([
- '--verbose',
'--build-directory=folder42',
'--platform=platform42',
'--builder-name', 'webkit-mac-1',
@@ -375,17 +420,14 @@ max 1120
'--output-json-path=a/output.json',
'--source-json-path=a/source.json',
'--test-results-server=somehost',
- '--debug', 'an_arg'])
+ '--debug'])
self.assertEqual(options.build, True)
- self.assertEqual(options.verbose, True)
- self.assertEqual(options.help_printing, None)
self.assertEqual(options.build_directory, 'folder42')
self.assertEqual(options.platform, 'platform42')
self.assertEqual(options.builder_name, 'webkit-mac-1')
self.assertEqual(options.build_number, '56')
self.assertEqual(options.time_out_ms, '42')
self.assertEqual(options.configuration, 'Debug')
- self.assertEqual(options.print_options, None)
self.assertEqual(options.output_json_path, 'a/output.json')
self.assertEqual(options.source_json_path, 'a/source.json')
self.assertEqual(options.test_results_server, 'somehost')
diff --git a/Tools/Scripts/webkitpy/style/checker.py b/Tools/Scripts/webkitpy/style/checker.py
index f7c82cfea..e72a025f4 100644
--- a/Tools/Scripts/webkitpy/style/checker.py
+++ b/Tools/Scripts/webkitpy/style/checker.py
@@ -40,6 +40,7 @@ from checkers.common import CarriageReturnChecker
from checkers.changelog import ChangeLogChecker
from checkers.cpp import CppChecker
from checkers.jsonchecker import JSONChecker
+from checkers.png import PNGChecker
from checkers.python import PythonChecker
from checkers.test_expectations import TestExpectationsChecker
from checkers.text import TextChecker
@@ -289,6 +290,8 @@ _XML_FILE_EXTENSIONS = [
'vsprops',
]
+_PNG_FILE_EXTENSION = 'png'
+
# Files to skip that are less obvious.
#
# Some files should be skipped when checking style. For example,
@@ -312,6 +315,7 @@ _SKIPPED_FILES_WITHOUT_WARNING = [
# Extensions of files which are allowed to contain carriage returns.
_CARRIAGE_RETURN_ALLOWED_FILE_EXTENSIONS = [
+ 'png',
'vcproj',
'vsprops',
]
@@ -330,6 +334,7 @@ def _all_categories():
categories = categories.union(JSONChecker.categories)
categories = categories.union(TestExpectationsChecker.categories)
categories = categories.union(ChangeLogChecker.categories)
+ categories = categories.union(PNGChecker.categories)
# FIXME: Consider adding all of the pep8 categories. Since they
# are not too meaningful for documentation purposes, for
@@ -471,11 +476,12 @@ class FileType:
CHANGELOG = 1
CPP = 2
JSON = 3
- PYTHON = 4
- TEXT = 5
- WATCHLIST = 6
- XML = 7
- XCODEPROJ = 8
+ PNG = 4
+ PYTHON = 5
+ TEXT = 6
+ WATCHLIST = 7
+ XML = 8
+ XCODEPROJ = 9
class CheckerDispatcher(object):
@@ -487,6 +493,9 @@ class CheckerDispatcher(object):
return os.path.splitext(file_path)[1].lstrip(".")
def _should_skip_file_path(self, file_path, skip_array_entry):
+ match = re.search("\s*png$", file_path)
+ if match:
+ return False
if isinstance(skip_array_entry, str):
if file_path.find(skip_array_entry) >= 0:
return True
@@ -550,6 +559,8 @@ class CheckerDispatcher(object):
return FileType.WATCHLIST
elif file_extension == _XCODEPROJ_FILE_EXTENSION:
return FileType.XCODEPROJ
+ elif file_extension == _PNG_FILE_EXTENSION:
+ return FileType.PNG
elif ((not file_extension and os.path.join("Tools", "Scripts") in file_path) or
file_extension in _TEXT_FILE_EXTENSIONS):
return FileType.TEXT
@@ -578,6 +589,8 @@ class CheckerDispatcher(object):
checker = XMLChecker(file_path, handle_style_error)
elif file_type == FileType.XCODEPROJ:
checker = XcodeProjectFileChecker(file_path, handle_style_error)
+ elif file_type == FileType.PNG:
+ checker = PNGChecker(file_path, handle_style_error)
elif file_type == FileType.TEXT:
basename = os.path.basename(file_path)
if basename == 'test_expectations.txt' or basename == 'drt_expectations.txt':
diff --git a/Tools/Scripts/webkitpy/style/checker_unittest.py b/Tools/Scripts/webkitpy/style/checker_unittest.py
index 60a959faf..d834fd557 100755
--- a/Tools/Scripts/webkitpy/style/checker_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checker_unittest.py
@@ -590,7 +590,6 @@ class CheckerDispatcherDispatchTest(unittest.TestCase):
paths = [
"Makefile",
"foo.asdf", # Non-sensical file extension.
- "foo.png",
"foo.exe",
]
diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py
index f29361766..1eea4973f 100644
--- a/Tools/Scripts/webkitpy/style/checkers/cpp.py
+++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py
@@ -2433,8 +2433,8 @@ def check_for_null(clean_lines, line_number, file_state, error):
if search(r'\bgdk_pixbuf_save_to\w+\b', line):
return
- # Don't warn about NULL usage in gtk_widget_style_get(). See Bug 51758.
- if search(r'\bgtk_widget_style_get\(\w+\b', line):
+ # Don't warn about NULL usage in gtk_widget_style_get() or gtk_style_context_get_style. See Bug 51758
+ if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line):
return
# Don't warn about NULL usage in soup_server_new(). See Bug 77890.
@@ -2776,8 +2776,11 @@ def check_include_line(filename, file_extension, clean_lines, line_number, inclu
if previous_match:
previous_header_type = include_state.header_types[previous_line_number]
if previous_header_type == _OTHER_HEADER and previous_line.strip() > line.strip():
- error(line_number, 'build/include_order', 4,
- 'Alphabetical sorting problem.')
+ # This type of error is potentially a problem with this line or the previous one,
+ # so if the error is filtered for one line, report it for the next. This is so that
+ # we properly handle patches, for which only modified lines produce errors.
+ if not error(line_number - 1, 'build/include_order', 4, 'Alphabetical sorting problem.'):
+ error(line_number, 'build/include_order', 4, 'Alphabetical sorting problem.')
if error_message:
if file_extension == 'h':
@@ -3106,6 +3109,7 @@ def check_identifier_name_in_declaration(filename, line_number, line, file_state
and not (filename.find('gtk') >= 0 and modified_identifier.startswith('webkit_') >= 0)
and not modified_identifier.startswith('tst_')
and not modified_identifier.startswith('webkit_dom_object_')
+ and not modified_identifier.startswith('webkit_soup')
and not modified_identifier.startswith('NPN_')
and not modified_identifier.startswith('NPP_')
and not modified_identifier.startswith('NP_')
diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
index 044f46b19..cba917108 100644
--- a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
@@ -53,22 +53,28 @@ class ErrorCollector:
# This is a list including all categories seen in any unit test.
_seen_style_categories = {}
- def __init__(self, assert_fn, filter=None):
+ def __init__(self, assert_fn, filter=None, lines_to_check=None):
"""assert_fn: a function to call when we notice a problem.
filter: filters the errors that we are concerned about."""
self._assert_fn = assert_fn
self._errors = []
+ self._lines_to_check = lines_to_check
if not filter:
filter = FilterConfiguration()
self._filter = filter
- def __call__(self, unused_linenum, category, confidence, message):
+ def __call__(self, line_number, category, confidence, message):
self._assert_fn(category in self._all_style_categories,
'Message "%s" has category "%s",'
' which is not in STYLE_CATEGORIES' % (message, category))
+
+ if self._lines_to_check and not line_number in self._lines_to_check:
+ return False
+
if self._filter.should_check(category, ""):
self._seen_style_categories[category] = 1
self._errors.append('%s [%s] [%d]' % (message, category, confidence))
+ return True
def results(self):
if len(self._errors) < 2:
@@ -246,8 +252,8 @@ class CppStyleTestBase(unittest.TestCase):
return cpp_style.process_file_data(filename, file_extension, lines,
error, self.min_confidence, unit_test_config)
- def perform_lint(self, code, filename, basic_error_rules, unit_test_config={}):
- error_collector = ErrorCollector(self.assert_, FilterConfiguration(basic_error_rules))
+ def perform_lint(self, code, filename, basic_error_rules, unit_test_config={}, lines_to_check=None):
+ error_collector = ErrorCollector(self.assert_, FilterConfiguration(basic_error_rules), lines_to_check)
lines = code.split('\n')
extension = filename.split('.')[1]
self.process_file_data(filename, extension, lines, error_collector, unit_test_config)
@@ -272,13 +278,13 @@ class CppStyleTestBase(unittest.TestCase):
return self.perform_lint(code, 'test.' + file_extension, basic_error_rules)
# Only keep some errors related to includes, namespaces and rtti.
- def perform_language_rules_check(self, filename, code):
+ def perform_language_rules_check(self, filename, code, lines_to_check=None):
basic_error_rules = ('-',
'+build/include',
'+build/include_order',
'+build/namespaces',
'+runtime/rtti')
- return self.perform_lint(code, filename, basic_error_rules)
+ return self.perform_lint(code, filename, basic_error_rules, lines_to_check=lines_to_check)
# Only keep function length errors.
def perform_function_lengths_check(self, code):
@@ -327,9 +333,9 @@ class CppStyleTestBase(unittest.TestCase):
if not re.search(expected_message_re, message):
self.fail('Message was:\n' + message + 'Expected match to "' + expected_message_re + '"')
- def assert_language_rules_check(self, file_name, code, expected_message):
+ def assert_language_rules_check(self, file_name, code, expected_message, lines_to_check=None):
self.assertEquals(expected_message,
- self.perform_language_rules_check(file_name, code))
+ self.perform_language_rules_check(file_name, code, lines_to_check))
def assert_include_what_you_use(self, code, expected_message):
self.assertEquals(expected_message,
@@ -2568,6 +2574,30 @@ class OrderOfIncludesTest(CppStyleTestBase):
'#include <assert.h>\n',
'')
+ def test_check_alphabetical_include_order_errors_reported_for_both_lines(self):
+ # If one of the two lines of out of order headers are filtered, the error should be
+ # reported on the other line.
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]',
+ lines_to_check=[2])
+
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]',
+ lines_to_check=[3])
+
+ # If no lines are filtered, the error should be reported only once.
+ self.assert_language_rules_check('foo.h',
+ '#include "a.h"\n'
+ '#include "c.h"\n'
+ '#include "b.h"\n',
+ 'Alphabetical sorting problem. [build/include_order] [4]')
+
def test_check_line_break_after_own_header(self):
self.assert_language_rules_check('foo.cpp',
'#include "config.h"\n'
@@ -4238,6 +4268,9 @@ class WebKitStyleTest(CppStyleTestBase):
'gtk_widget_style_get(style, "propertyName", &value, "otherName", &otherValue, NULL);',
'')
self.assert_lint(
+ 'gtk_style_context_get_style(context, "propertyName", &value, "otherName", &otherValue, NULL);',
+ '')
+ self.assert_lint(
'gtk_widget_style_get_property(style, NULL, NULL);',
'Use 0 instead of NULL. [readability/null] [5]',
'foo.cpp')
diff --git a/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py b/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
index 503f72fa4..973c67384 100755
--- a/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
@@ -39,6 +39,7 @@ class MockErrorHandler(object):
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
+ return True
class JSONCheckerTest(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/style/checkers/png.py b/Tools/Scripts/webkitpy/style/checkers/png.py
new file mode 100644
index 000000000..e0b79e27e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/png.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Supports checking WebKit style in png files."""
+
+import os
+import re
+
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.checkout.scm.detection import SCMDetector
+
+
+class PNGChecker(object):
+ """Check svn:mime-type for checking style"""
+
+ categories = set(['image/png'])
+
+ def __init__(self, file_path, handle_style_error, scm=None, host=None):
+ self._file_path = file_path
+ self._handle_style_error = handle_style_error
+ self._host = host or SystemHost()
+ self._fs = self._host.filesystem
+ self._detector = scm or SCMDetector(self._fs, self._host.executive).detect_scm_system(self._fs.getcwd())
+
+ def check(self, inline=None):
+ errorstr = ""
+ config_file_path = ""
+ detection = self._detector.display_name()
+
+ if detection == "git":
+ config_file_path = self._config_file_path()
+ there_is_enable_line = False
+ there_is_png_line = False
+
+ try:
+ config_file = self._fs.read_text_file(config_file_path)
+ except IOError:
+ errorstr = "There is no " + config_file_path
+ self._handle_style_error(0, 'image/png', 5, errorstr)
+ return
+
+ errorstr_autoprop = "Have to enable auto props in the subversion config file (" + config_file_path + " \"enable-auto-props = yes\"). "
+ errorstr_png = "Have to set the svn:mime-type in the subversion config file (" + config_file_path + " \"*.png = svn:mime-type=image/png\")."
+
+ for line in config_file.split('\n'):
+ if not there_is_enable_line:
+ match = re.search("^\s*enable-auto-props\s*=\s*yes", line)
+ if match:
+ there_is_enable_line = True
+ errorstr_autoprop = ""
+ continue
+
+ if not there_is_png_line:
+ match = re.search("^\s*\*\.png\s*=\s*svn:mime-type=image/png", line)
+ if match:
+ there_is_png_line = True
+ errorstr_png = ""
+ continue
+
+ errorstr = errorstr_autoprop + errorstr_png
+ if errorstr:
+ self._handle_style_error(0, 'image/png', 5, errorstr)
+
+ elif detection == "svn":
+ prop_get = self._detector.propget("svn:mime-type", self._file_path)
+ if prop_get != "image/png":
+ errorstr = "Set the svn:mime-type property (svn propset svn:mime-type image/png " + self._file_path + ")."
+ self._handle_style_error(0, 'image/png', 5, errorstr)
+
+ def _config_file_path(self):
+ config_file = ""
+ if self._host.platform.is_win():
+ config_file_path = self._fs.join(os.environ['APPDATA'], "Subversion\config")
+ else:
+ config_file_path = self._fs.join(self._fs.expanduser("~"), ".subversion/config")
+ return config_file_path
diff --git a/Tools/Scripts/webkitpy/style/checkers/png_unittest.py b/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
new file mode 100644
index 000000000..ae46641a2
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
@@ -0,0 +1,118 @@
+# Copyright (C) 2012 Balazs Ankes (bank@inf.u-szeged.hu) University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for png.py."""
+
+import unittest
+from png import PNGChecker
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.systemhost_mock import MockSystemHost
+
+
+class MockSCMDetector(object):
+
+ def __init__(self, scm, prop=None):
+ self._scm = scm
+ self._prop = prop
+
+ def display_name(self):
+ return self._scm
+
+ def propget(self, pname, path):
+ return self._prop
+
+
+class PNGCheckerTest(unittest.TestCase):
+ """Tests PNGChecker class."""
+
+ def test_init(self):
+ """Test __init__() method."""
+
+ def mock_handle_style_error(self):
+ pass
+
+ checker = PNGChecker("test/config", mock_handle_style_error, MockSCMDetector('git'), MockSystemHost())
+ self.assertEquals(checker._file_path, "test/config")
+ self.assertEquals(checker._handle_style_error, mock_handle_style_error)
+
+ def test_check(self):
+ errors = []
+
+ def mock_handle_style_error(line_number, category, confidence, message):
+ error = (line_number, category, confidence, message)
+ errors.append(error)
+
+ file_path = ''
+
+ fs = MockFileSystem()
+
+ scm = MockSCMDetector('svn')
+ checker = PNGChecker(file_path, mock_handle_style_error, scm, MockSystemHost(filesystem=fs))
+ checker.check()
+ self.assertEquals(len(errors), 1)
+ self.assertEquals(errors[0],
+ (0, 'image/png', 5, 'Set the svn:mime-type property (svn propset svn:mime-type image/png ).'))
+
+ files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEquals(len(errors), 0)
+
+ files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEquals(len(errors), 1)
+
+ files = {'/Users/mock/.subversion/config': 'enable-auto-props = yes\n#enable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEquals(len(errors), 0)
+
+ files = {'/Users/mock/.subversion/config': '#enable-auto-props = yes\nenable-auto-props = yes\n*.png = svn:mime-type=image/png'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEquals(len(errors), 0)
+
+ files = {'/Users/mock/.subversion/config': 'enable-auto-props = no'}
+ fs = MockFileSystem(files)
+ scm = MockSCMDetector('git')
+ errors = []
+ checker = PNGChecker("config", mock_handle_style_error, scm, MockSystemHost(os_name='linux', filesystem=fs))
+ checker.check()
+ self.assertEquals(len(errors), 1)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
index 77bdf8289..b6e3595fa 100644
--- a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
@@ -47,6 +47,7 @@ class ErrorCollector(object):
def __call__(self, lineno, category, confidence, message):
self._errors.append('%s [%s] [%d]' % (message, category, confidence))
+ return True
def get_errors(self):
return ''.join(self._errors)
diff --git a/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py b/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
index f8ece354c..c8d29db02 100644
--- a/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
@@ -49,6 +49,7 @@ class MockErrorHandler(object):
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
+ return True
class WatchListTest(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
index 91d9192e9..9799ec016 100644
--- a/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
@@ -39,6 +39,7 @@ class TestErrorHandler(object):
def __call__(self, line_number, category, confidence, message):
self.handler(self, line_number, category, confidence, message)
+ return True
class XcodeProjectFileCheckerTest(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
index 9bcf97f0a..e486f5fe4 100644
--- a/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
@@ -39,6 +39,7 @@ class MockErrorHandler(object):
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
+ return True
class XMLCheckerTest(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/style/error_handlers.py b/Tools/Scripts/webkitpy/style/error_handlers.py
index 89d1c8a3d..99d5cb33b 100644
--- a/Tools/Scripts/webkitpy/style/error_handlers.py
+++ b/Tools/Scripts/webkitpy/style/error_handlers.py
@@ -138,12 +138,12 @@ class DefaultStyleErrorHandler(object):
"""
if not self.should_line_be_checked(line_number):
- return
+ return False
if not self._configuration.is_reportable(category=category,
confidence_in_error=confidence,
file_path=self._file_path):
- return
+ return False
category_total = self._add_reportable_error(category)
@@ -151,14 +151,14 @@ class DefaultStyleErrorHandler(object):
if (max_reports is not None) and (category_total > max_reports):
# Then suppress displaying the error.
- return
+ return False
self._configuration.write_style_error(category=category,
confidence_in_error=confidence,
file_path=self._file_path,
line_number=line_number,
message=message)
-
if category_total == max_reports:
self._configuration.stderr_write("Suppressing further [%s] reports "
"for this file.\n" % category)
+ return True
diff --git a/Tools/Scripts/webkitpy/style/patchreader.py b/Tools/Scripts/webkitpy/style/patchreader.py
index 6ac32ded7..8495cd054 100644
--- a/Tools/Scripts/webkitpy/style/patchreader.py
+++ b/Tools/Scripts/webkitpy/style/patchreader.py
@@ -29,8 +29,12 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
+import re
from webkitpy.common.checkout.diff_parser import DiffParser
+from webkitpy.common.system.executive import Executive
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.common.checkout.scm.detection import SCMDetector
_log = logging.getLogger(__name__)
@@ -48,15 +52,28 @@ class PatchReader(object):
"""
self._text_file_reader = text_file_reader
- def check(self, patch_string):
+ def check(self, patch_string, fs=None):
"""Check style in the given patch."""
+ fs = fs or FileSystem()
patch_files = DiffParser(patch_string.splitlines()).files
+ # If the user uses git, checking subversion config file only once is enough.
+ call_only_once = True
+
for path, diff_file in patch_files.iteritems():
line_numbers = diff_file.added_or_modified_line_numbers()
_log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path))
if not line_numbers:
+ match = re.search("\s*png$", path)
+ if match and fs.exists(path):
+ if call_only_once:
+ self._text_file_reader.process_file(file_path=path, line_numbers=None)
+ cwd = FileSystem().getcwd()
+ detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
+ if detection.display_name() == "git":
+ call_only_once = False
+ continue
# Don't check files which contain only deleted lines
# as they can never add style errors. However, mark them as
# processed so that we count up number of such files.
diff --git a/Tools/Scripts/webkitpy/style/patchreader_unittest.py b/Tools/Scripts/webkitpy/style/patchreader_unittest.py
index b1210825b..eb26d4761 100644
--- a/Tools/Scripts/webkitpy/style/patchreader_unittest.py
+++ b/Tools/Scripts/webkitpy/style/patchreader_unittest.py
@@ -33,6 +33,7 @@
import unittest
+from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.style.patchreader import PatchReader
@@ -90,3 +91,13 @@ index ef65bee..e3db70e 100644
""")
# _mock_check_file should not be called for the deletion patch.
self._assert_checked([], 1)
+
+ def test_check_patch_with_png_deletion(self):
+ fs = MockFileSystem()
+ diff_text = """Index: LayoutTests/platform/mac/foo-expected.png
+===================================================================
+Cannot display: file marked as a binary type.
+svn:mime-type = image/png
+"""
+ self._patch_checker.check(diff_text, fs)
+ self._assert_checked([], 1)
diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py
index 76dac0e3b..af3123a01 100644
--- a/Tools/Scripts/webkitpy/test/main.py
+++ b/Tools/Scripts/webkitpy/test/main.py
@@ -1,3 +1,4 @@
+# Copyright (C) 2012 Google, Inc.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
@@ -24,34 +25,27 @@
import logging
import optparse
-import os
import StringIO
import sys
import traceback
import unittest
-# NOTE: We intentionally do not depend on anything else in webkitpy here to avoid breaking test-webkitpy.
+from webkitpy.common.system.filesystem import FileSystem
+from webkitpy.test.test_finder import TestFinder
+from webkitpy.test.runner import TestRunner
_log = logging.getLogger(__name__)
class Tester(object):
- @staticmethod
- def clean_packages(dirs):
- """Delete all .pyc files under dirs that have no .py file."""
- for dir_to_clean in dirs:
- _log.debug("Cleaning orphaned *.pyc files from: %s" % dir_to_clean)
- for dir_path, dir_names, file_names in os.walk(dir_to_clean):
- for file_name in file_names:
- if file_name.endswith(".pyc") and file_name[:-1] not in file_names:
- file_path = os.path.join(dir_path, file_name)
- _log.info("Deleting orphan *.pyc file: %s" % file_path)
- os.remove(file_path)
-
- def __init__(self):
- self._verbosity = 1
-
- def parse_args(self, argv):
+ def __init__(self, filesystem=None):
+ self.finder = TestFinder(filesystem or FileSystem())
+ self.stream = sys.stderr
+
+ def add_tree(self, top_directory, starting_subdirectory=None):
+ self.finder.add_tree(top_directory, starting_subdirectory)
+
+ def _parse_args(self):
parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
parser.add_option('-a', '--all', action='store_true', default=False,
help='run all the tests'),
@@ -59,10 +53,8 @@ class Tester(object):
help='generate code coverage info (requires http://pypi.python.org/pypi/coverage)'),
parser.add_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)'),
- parser.add_option('-s', '--silent', action='store_true', default=False,
- help='run silently (errors and warnings only)'),
- parser.add_option('-x', '--xml', action='store_true', default=False,
- help='output xUnit-style XML output')
+ parser.add_option('-t', '--timing', action='store_true', default=False,
+ help='display per-test execution time (implies --verbose)'),
parser.add_option('-v', '--verbose', action='count', default=0,
help='verbose output (specify once for individual test results, twice for debug messages)')
parser.add_option('--skip-integrationtests', action='store_true', default=False,
@@ -71,27 +63,21 @@ class Tester(object):
parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
'If no args are given, all the tests will be run.')
- self.progName = os.path.basename(argv[0])
- return parser.parse_args(argv[1:])
+ return parser.parse_args()
- def configure(self, options):
+ def _configure(self, options):
self._options = options
- if options.silent:
- self._verbosity = 0
- self._configure_logging(logging.WARNING)
- elif options.quiet:
- self._verbosity = 1
- self._configure_logging(logging.WARNING)
- elif options.verbose == 0:
- self._verbosity = 1
- self._configure_logging(logging.INFO)
- elif options.verbose == 1:
- self._verbosity = 2
- self._configure_logging(logging.INFO)
+ if options.timing:
+ # --timing implies --verbose
+ options.verbose = max(options.verbose, 1)
+
+ log_level = logging.INFO
+ if options.quiet:
+ log_level = logging.WARNING
elif options.verbose == 2:
- self._verbosity = 2
- self._configure_logging(logging.DEBUG)
+ log_level = logging.DEBUG
+ self._configure_logging(log_level)
def _configure_logging(self, log_level):
"""Configure the root logger.
@@ -100,7 +86,7 @@ class Tester(object):
except for messages from the autoinstall module. Also set the
logging level as described below.
"""
- handler = logging.StreamHandler(sys.stderr)
+ handler = logging.StreamHandler(self.stream)
# We constrain the level on the handler rather than on the root
# logger itself. This is probably better because the handler is
# configured and known only to this module, whereas the root logger
@@ -108,6 +94,7 @@ class Tester(object):
# Modifying the handler, then, is less intrusive and less likely to
# interfere with modifications made by other modules (e.g. in unit
# tests).
+ handler.name = __name__
handler.setLevel(log_level)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
@@ -141,61 +128,16 @@ class Tester(object):
_log.info("Suppressing most webkitpy logging while running unit tests.")
handler.addFilter(testing_filter)
- def run(self, dirs, args):
- args = args or self._find_modules(dirs)
- return self._run_tests(dirs, args)
-
- def _find_modules(self, dirs):
- modules = []
- for dir_to_search in dirs:
- modules.extend(self._find_modules_under(dir_to_search, '_unittest.py'))
- if not self._options.skip_integrationtests:
- modules.extend(self._find_modules_under(dir_to_search, '_integrationtest.py'))
- modules.sort()
-
- for module in modules:
- _log.debug("Found: %s" % module)
-
- # FIXME: Figure out how to move this to test-webkitpy in order to to make this file more generic.
- if not self._options.all:
- slow_tests = ('webkitpy.common.checkout.scm.scm_unittest',)
- self._exclude(modules, slow_tests, 'are really, really slow', 31818)
-
- if sys.platform == 'win32':
- win32_blacklist = ('webkitpy.common.checkout',
- 'webkitpy.common.config',
- 'webkitpy.tool')
- self._exclude(modules, win32_blacklist, 'fail horribly on win32', 54526)
-
- return modules
-
- def _exclude(self, modules, module_prefixes, reason, bugid):
- _log.info('Skipping tests in the following modules or packages because they %s:' % reason)
- for prefix in module_prefixes:
- _log.info(' %s' % prefix)
- modules_to_exclude = filter(lambda m: m.startswith(prefix), modules)
- for m in modules_to_exclude:
- if len(modules_to_exclude) > 1:
- _log.debug(' %s' % m)
- modules.remove(m)
- _log.info(' (https://bugs.webkit.org/show_bug.cgi?id=%d; use --all to include)' % bugid)
- _log.info('')
-
- def _find_modules_under(self, dir_to_search, suffix):
-
- def to_package(dir_path):
- return dir_path.replace(dir_to_search + os.sep, '').replace(os.sep, '.')
-
- def to_module(filename, package):
- return package + '.' + filename.replace('.py', '')
-
- modules = []
- for dir_path, _, filenames in os.walk(dir_to_search):
- package = to_package(dir_path)
- modules.extend(to_module(f, package) for f in filenames if f.endswith(suffix))
- return modules
-
- def _run_tests(self, dirs, args):
+ def run(self):
+ options, args = self._parse_args()
+ self._configure(options)
+
+ self.finder.clean_trees()
+
+ names = self.finder.find_names(args, self._options.skip_integrationtests, self._options.all)
+ return self._run_tests(names)
+
+ def _run_tests(self, names):
if self._options.coverage:
try:
import webkitpy.thirdparty.autoinstalled.coverage as coverage
@@ -205,28 +147,29 @@ class Tester(object):
cov = coverage.coverage()
cov.start()
+ # Make sure PYTHONPATH is set up properly.
+ sys.path = self.finder.additional_paths(sys.path) + sys.path
+
_log.debug("Loading the tests...")
loader = unittest.defaultTestLoader
suites = []
- for name in args:
- if self._is_module(dirs, name):
- # import modules explicitly before loading their tests because
- # loadTestsFromName() produces lousy error messages for bad modules.
+ for name in names:
+ if self.finder.is_module(name):
+ # if we failed to load a name and it looks like a module,
+ # try importing it directly, because loadTestsFromName()
+ # produces lousy error messages for bad modules.
try:
__import__(name)
except ImportError, e:
_log.fatal('Failed to import %s:' % name)
self._log_exception()
return False
+
suites.append(loader.loadTestsFromName(name, None))
test_suite = unittest.TestSuite(suites)
- if self._options.xml:
- from webkitpy.thirdparty.autoinstalled.xmlrunner import XMLTestRunner
- test_runner = XMLTestRunner(output='test-webkitpy-xml-reports')
- else:
- test_runner = unittest.TextTestRunner(verbosity=self._verbosity)
+ test_runner = TestRunner(self.stream, self._options, loader)
_log.debug("Running the tests.")
result = test_runner.run(test_suite)
@@ -236,10 +179,6 @@ class Tester(object):
cov.report(show_missing=False)
return result.wasSuccessful()
- def _is_module(self, dirs, name):
- relpath = name.replace('.', os.sep) + '.py'
- return any(os.path.exists(os.path.join(d, relpath)) for d in dirs)
-
def _log_exception(self):
s = StringIO.StringIO()
traceback.print_exc(file=s)
diff --git a/Tools/Scripts/webkitpy/test/runner.py b/Tools/Scripts/webkitpy/test/runner.py
new file mode 100644
index 000000000..e190f2cd4
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/runner.py
@@ -0,0 +1,132 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""code to actually run a list of python tests."""
+
+import logging
+import re
+import time
+import unittest
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestRunner(object):
+ def __init__(self, stream, options, loader):
+ self.options = options
+ self.stream = stream
+ self.loader = loader
+ self.test_description = re.compile("(\w+) \(([\w.]+)\)")
+
+ def test_name(self, test):
+ m = self.test_description.match(str(test))
+ return "%s.%s" % (m.group(2), m.group(1))
+
+ def all_test_names(self, suite):
+ names = []
+ if hasattr(suite, '_tests'):
+ for t in suite._tests:
+ names.extend(self.all_test_names(t))
+ else:
+ names.append(self.test_name(suite))
+ return names
+
+ def run(self, suite):
+ run_start_time = time.time()
+ all_test_names = self.all_test_names(suite)
+ result = unittest.TestResult()
+ stop = run_start_time
+ for test_name in all_test_names:
+ if self.options.verbose:
+ self.stream.write(test_name)
+ num_failures = len(result.failures)
+ num_errors = len(result.errors)
+
+ start = time.time()
+ # FIXME: it's kinda lame that we re-load the test suites for each
+ # test, and this may slow things down, but this makes implementing
+ # the logging easy and will also allow us to parallelize nicely.
+ self.loader.loadTestsFromName(test_name, None).run(result)
+ stop = time.time()
+
+ err = None
+ failure = None
+ if len(result.failures) > num_failures:
+ failure = result.failures[num_failures][1]
+ elif len(result.errors) > num_errors:
+ err = result.errors[num_errors][1]
+ self.write_result(result, test_name, stop - start, failure, err)
+
+ self.write_summary(result, stop - run_start_time)
+
+ return result
+
+ def write_result(self, result, test_name, test_time, failure=None, err=None):
+ timing = ''
+ if self.options.timing:
+ timing = ' %.4fs' % test_time
+ if self.options.verbose:
+ if failure:
+ msg = ' failed'
+ elif err:
+ msg = ' erred'
+ else:
+ msg = ' passed'
+ self.stream.write(msg + timing + '\n')
+ else:
+ if failure:
+ msg = 'F'
+ elif err:
+ msg = 'E'
+ else:
+ msg = '.'
+ self.stream.write(msg)
+
+ def write_summary(self, result, run_time):
+ self.stream.write('\n')
+
+ for (test, err) in result.errors:
+ self.stream.write("=" * 80 + '\n')
+ self.stream.write("ERROR: " + self.test_name(test) + '\n')
+ self.stream.write("-" * 80 + '\n')
+ for line in err.splitlines():
+ self.stream.write(line + '\n')
+ self.stream.write('\n')
+
+ for (test, failure) in result.failures:
+ self.stream.write("=" * 80 + '\n')
+ self.stream.write("FAILURE: " + self.test_name(test) + '\n')
+ self.stream.write("-" * 80 + '\n')
+ for line in failure.splitlines():
+ self.stream.write(line + '\n')
+ self.stream.write('\n')
+
+ self.stream.write('-' * 80 + '\n')
+ self.stream.write('Ran %d test%s in %.3fs\n' %
+ (result.testsRun, result.testsRun != 1 and "s" or "", run_time))
+
+ if result.wasSuccessful():
+ self.stream.write('\nOK\n')
+ else:
+ self.stream.write('FAILED (failures=%d, errors=%d)\n' %
+ (len(result.failures), len(result.errors)))
diff --git a/Tools/Scripts/webkitpy/test/runner_unittest.py b/Tools/Scripts/webkitpy/test/runner_unittest.py
new file mode 100644
index 000000000..e2ea31aa1
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/runner_unittest.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+import StringIO
+import unittest
+
+from webkitpy.tool.mocktool import MockOptions
+from webkitpy.test.runner import TestRunner
+
+
+class FakeModuleSuite(object):
+ def __init__(self, name, result, msg):
+ self.name = name
+ self.result = result
+ self.msg = msg
+
+ def __str__(self):
+ return self.name
+
+ def run(self, result):
+ result.testsRun += 1
+ if self.result == 'F':
+ result.failures.append((self.name, self.msg))
+ elif self.result == 'E':
+ result.errors.append((self.name, self.msg))
+
+
+class FakeTopSuite(object):
+ def __init__(self, tests):
+ self._tests = tests
+
+
+class FakeLoader(object):
+ def __init__(self, *test_triples):
+ self.triples = test_triples
+ self._tests = []
+ self._results = {}
+ for test_name, result, msg in self.triples:
+ self._tests.append(test_name)
+ m = re.match("(\w+) \(([\w.]+)\)", test_name)
+ self._results['%s.%s' % (m.group(2), m.group(1))] = tuple([test_name, result, msg])
+
+ def top_suite(self):
+ return FakeTopSuite(self._tests)
+
+ def loadTestsFromName(self, name, dummy):
+ return FakeModuleSuite(*self._results[name])
+
+
+class RunnerTest(unittest.TestCase):
+ def test_regular(self):
+ options = MockOptions(verbose=0, timing=False)
+ stream = StringIO.StringIO()
+ loader = FakeLoader(('test1 (Foo)', '.', ''),
+ ('test2 (Foo)', 'F', 'test2\nfailed'),
+ ('test3 (Foo)', 'E', 'test3\nerred'))
+ result = TestRunner(stream, options, loader).run(loader.top_suite())
+ self.assertFalse(result.wasSuccessful())
+ self.assertEquals(result.testsRun, 3)
+ self.assertEquals(len(result.failures), 1)
+ self.assertEquals(len(result.errors), 1)
+ # FIXME: check the output from the test
+
+ def test_verbose(self):
+ options = MockOptions(verbose=1, timing=False)
+ stream = StringIO.StringIO()
+ loader = FakeLoader(('test1 (Foo)', '.', ''),
+ ('test2 (Foo)', 'F', 'test2\nfailed'),
+ ('test3 (Foo)', 'E', 'test3\nerred'))
+ result = TestRunner(stream, options, loader).run(loader.top_suite())
+ self.assertFalse(result.wasSuccessful())
+ self.assertEquals(result.testsRun, 3)
+ self.assertEquals(len(result.failures), 1)
+ self.assertEquals(len(result.errors), 1)
+ # FIXME: check the output from the test
+
+ def test_timing(self):
+ options = MockOptions(verbose=0, timing=True)
+ stream = StringIO.StringIO()
+ loader = FakeLoader(('test1 (Foo)', '.', ''),
+ ('test2 (Foo)', 'F', 'test2\nfailed'),
+ ('test3 (Foo)', 'E', 'test3\nerred'))
+ result = TestRunner(stream, options, loader).run(loader.top_suite())
+ self.assertFalse(result.wasSuccessful())
+ self.assertEquals(result.testsRun, 3)
+ self.assertEquals(len(result.failures), 1)
+ self.assertEquals(len(result.errors), 1)
+ # FIXME: check the output from the test
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/test/test_finder.py b/Tools/Scripts/webkitpy/test/test_finder.py
new file mode 100644
index 000000000..b2671e917
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/test_finder.py
@@ -0,0 +1,184 @@
+# Copyright (C) 2012 Google, Inc.
+# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""this module is responsible for finding python tests."""
+
+import logging
+import re
+import sys
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestDirectoryTree(object):
+ def __init__(self, filesystem, top_directory, starting_subdirectory):
+ self.filesystem = filesystem
+ self.top_directory = filesystem.realpath(top_directory)
+ self.search_directory = self.top_directory
+ self.top_package = ''
+ if starting_subdirectory:
+ self.top_package = starting_subdirectory.replace(filesystem.sep, '.') + '.'
+ self.search_directory = filesystem.join(self.top_directory, starting_subdirectory)
+
+ def find_modules(self, suffixes, sub_directory=None):
+ if sub_directory:
+ search_directory = self.filesystem.join(self.top_directory, sub_directory)
+ else:
+ search_directory = self.search_directory
+
+ def file_filter(filesystem, dirname, basename):
+ return any(basename.endswith(suffix) for suffix in suffixes)
+
+ filenames = self.filesystem.files_under(search_directory, file_filter=file_filter)
+ return [self.to_module(filename) for filename in filenames]
+
+ def to_module(self, path):
+ return path.replace(self.top_directory + self.filesystem.sep, '').replace(self.filesystem.sep, '.')[:-3]
+
+ def subpath(self, path):
+ """Returns the relative path from the top of the tree to the path, or None if the path is not under the top of the tree."""
+ realpath = self.filesystem.realpath(self.filesystem.join(self.top_directory, path))
+ if realpath.startswith(self.top_directory + self.filesystem.sep):
+ return realpath.replace(self.top_directory + self.filesystem.sep, '')
+ return None
+
+
+ def clean(self):
+ """Delete all .pyc files in the tree that have no matching .py file."""
+ _log.debug("Cleaning orphaned *.pyc files from: %s" % self.search_directory)
+ filenames = self.filesystem.files_under(self.search_directory)
+ for filename in filenames:
+ if filename.endswith(".pyc") and filename[:-1] not in filenames:
+ _log.info("Deleting orphan *.pyc file: %s" % filename)
+ self.filesystem.remove(filename)
+
+
+class TestFinder(object):
+ def __init__(self, filesystem):
+ self.filesystem = filesystem
+ self.trees = []
+
+ def add_tree(self, top_directory, starting_subdirectory=None):
+ self.trees.append(TestDirectoryTree(self.filesystem, top_directory, starting_subdirectory))
+
+ def additional_paths(self, paths):
+ return [tree.top_directory for tree in self.trees if tree.top_directory not in paths]
+
+ def clean_trees(self):
+ for tree in self.trees:
+ tree.clean()
+
+ def is_module(self, name):
+ relpath = name.replace('.', self.filesystem.sep) + '.py'
+ return any(self.filesystem.exists(self.filesystem.join(tree.top_directory, relpath)) for tree in self.trees)
+
+ def is_dotted_name(self, name):
+ return re.match(r'[a-zA-Z.][a-zA-Z0-9_.]*', name)
+
+ def to_module(self, path):
+ for tree in self.trees:
+ if path.startswith(tree.top_directory):
+ return tree.to_module(path)
+ return None
+
+ def find_names(self, args, skip_integrationtests, find_all):
+ suffixes = ['_unittest.py']
+ if not skip_integrationtests:
+ suffixes.append('_integrationtest.py')
+
+ if args:
+ names = []
+ for arg in args:
+ names.extend(self._find_names_for_arg(arg, suffixes))
+ return names
+
+ return self._default_names(suffixes, find_all)
+
+ def _find_names_for_arg(self, arg, suffixes):
+ realpath = self.filesystem.realpath(arg)
+ if self.filesystem.exists(realpath):
+ names = self._find_in_trees(realpath, suffixes)
+ if not names:
+ _log.error("%s is not in one of the test trees." % arg)
+ return names
+
+ # See if it's a python package in a tree (or a relative path from the top of a tree).
+ names = self._find_in_trees(arg.replace('.', self.filesystem.sep), suffixes)
+ if names:
+ return names
+
+ if self.is_dotted_name(arg):
+ # The name may not exist, but that's okay; we'll find out later.
+ return [arg]
+
+ _log.error("%s is not a python name or an existing file or directory." % arg)
+ return []
+
+ def _find_in_trees(self, path, suffixes):
+ for tree in self.trees:
+ relpath = tree.subpath(path)
+ if not relpath:
+ continue
+ if self.filesystem.isfile(path):
+ return [tree.to_module(path)]
+ else:
+ return tree.find_modules(suffixes, path)
+ return []
+
+ def _default_names(self, suffixes, find_all):
+ modules = []
+ for tree in self.trees:
+ modules.extend(tree.find_modules(suffixes))
+ modules.sort()
+
+ for module in modules:
+ _log.debug("Found: %s" % module)
+
+ # FIXME: Figure out how to move this to test-webkitpy in order to to make this file more generic.
+ if not find_all:
+ slow_tests = ('webkitpy.common.checkout.scm.scm_unittest',)
+ self._exclude(modules, slow_tests, 'are really, really slow', 31818)
+
+ if sys.platform == 'win32':
+ win32_blacklist = ('webkitpy.common.checkout',
+ 'webkitpy.common.config',
+ 'webkitpy.tool')
+ self._exclude(modules, win32_blacklist, 'fail horribly on win32', 54526)
+
+ win32_blacklist_84726 = ('webkitpy.layout_tests.servers.http_server_unittest.TestHttpServer.test_start_cmd')
+ self._exclude(modules, win32_blacklist_84726, 'fails on win32', 84726)
+
+ return modules
+
+ def _exclude(self, modules, module_prefixes, reason, bugid):
+ _log.info('Skipping tests in the following modules or packages because they %s:' % reason)
+ for prefix in module_prefixes:
+ _log.info(' %s' % prefix)
+ modules_to_exclude = filter(lambda m: m.startswith(prefix), modules)
+ for m in modules_to_exclude:
+ if len(modules_to_exclude) > 1:
+ _log.debug(' %s' % m)
+ modules.remove(m)
+ _log.info(' (https://bugs.webkit.org/show_bug.cgi?id=%d; use --all to include)' % bugid)
+ _log.info('')
diff --git a/Tools/Scripts/webkitpy/test/test_finder_unittest.py b/Tools/Scripts/webkitpy/test/test_finder_unittest.py
new file mode 100644
index 000000000..5b6b3b030
--- /dev/null
+++ b/Tools/Scripts/webkitpy/test/test_finder_unittest.py
@@ -0,0 +1,134 @@
+# Copyright (C) 2012 Google, Inc.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import unittest
+
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.test.test_finder import TestFinder
+
+
+class TestFinderTest(unittest.TestCase):
+ def setUp(self):
+ files = {
+ '/foo/bar/baz.py': '',
+ '/foo/bar/baz_unittest.py': '',
+ '/foo2/bar2/baz2.py': '',
+ '/foo2/bar2/baz2.pyc': '',
+ '/foo2/bar2/baz2_integrationtest.py': '',
+ '/foo2/bar2/missing.pyc': '',
+ '/tmp/another_unittest.py': '',
+ }
+ self.fs = MockFileSystem(files)
+ self.finder = TestFinder(self.fs)
+ self.finder.add_tree('/foo', 'bar')
+ self.finder.add_tree('/foo2')
+
+ # Here we have to jump through a hoop to make sure test-webkitpy doesn't log
+ # any messages from these tests :(.
+ self.root_logger = logging.getLogger()
+ self.log_handler = None
+ for h in self.root_logger.handlers:
+ if getattr(h, 'name', None) == 'webkitpy.test.main':
+ self.log_handler = h
+ break
+ if self.log_handler:
+ self.log_level = self.log_handler.level
+ self.log_handler.level = logging.CRITICAL
+
+ def tearDown(self):
+ if self.log_handler:
+ self.log_handler.setLevel(self.log_level)
+
+ def test_additional_system_paths(self):
+ self.assertEquals(self.finder.additional_paths(['/usr']),
+ ['/foo', '/foo2'])
+
+ def test_is_module(self):
+ self.assertTrue(self.finder.is_module('bar.baz'))
+ self.assertTrue(self.finder.is_module('bar2.baz2'))
+ self.assertTrue(self.finder.is_module('bar2.baz2_integrationtest'))
+
+ # Missing the proper namespace.
+ self.assertFalse(self.finder.is_module('baz'))
+
+ def test_to_module(self):
+ self.assertEquals(self.finder.to_module('/foo/test.py'), 'test')
+ self.assertEquals(self.finder.to_module('/foo/bar/test.py'), 'bar.test')
+ self.assertEquals(self.finder.to_module('/foo/bar/pytest.py'), 'bar.pytest')
+
+ def test_clean(self):
+ self.assertTrue(self.fs.exists('/foo2/bar2/missing.pyc'))
+ self.finder.clean_trees()
+ self.assertFalse(self.fs.exists('/foo2/bar2/missing.pyc'))
+
+ def check_names(self, names, expected_names, skip_integrationtests=False, find_all=False):
+ self.assertEquals(self.finder.find_names(names, skip_integrationtests, find_all),
+ expected_names)
+
+ def test_default_names(self):
+ self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'])
+ self.check_names([], ['bar.baz_unittest'], skip_integrationtests=True, find_all=True)
+ self.check_names([], ['bar.baz_unittest'], skip_integrationtests=True, find_all=False)
+
+ # Should return the names given it, even if they don't exist.
+ self.check_names(['foobar'], ['foobar'], skip_integrationtests=True, find_all=False)
+
+ def test_paths(self):
+ self.fs.chdir('/foo/bar')
+ self.check_names(['baz_unittest.py'], ['bar.baz_unittest'])
+ self.check_names(['./baz_unittest.py'], ['bar.baz_unittest'])
+ self.check_names(['/foo/bar/baz_unittest.py'], ['bar.baz_unittest'])
+ self.check_names(['.'], ['bar.baz_unittest'])
+ self.check_names(['../../foo2/bar2'], ['bar2.baz2_integrationtest'])
+
+ self.fs.chdir('/')
+ self.check_names(['bar'], ['bar.baz_unittest'])
+ self.check_names(['/foo/bar/'], ['bar.baz_unittest'])
+
+ # This works 'by accident' since it maps onto a package.
+ self.check_names(['bar/'], ['bar.baz_unittest'])
+
+ # This should log an error, since it's outside the trees.
+ oc = OutputCapture()
+ oc.set_log_level(logging.ERROR)
+ oc.capture_output()
+ try:
+ self.check_names(['/tmp/another_unittest.py'], [])
+ finally:
+ _, _, logs = oc.restore_output()
+ self.assertTrue('another_unittest.py' in logs)
+
+ # Paths that don't exist are errors.
+ oc.capture_output()
+ try:
+ self.check_names(['/foo/bar/notexist_unittest.py'], [])
+ finally:
+ _, _, logs = oc.restore_output()
+ self.assertTrue('notexist_unittest.py' in logs)
+
+ # Names that don't exist are caught later, at load time.
+ self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init__.py b/Tools/Scripts/webkitpy/thirdparty/__init__.py
index 2c39de6cc..a0cf0f4d4 100644
--- a/Tools/Scripts/webkitpy/thirdparty/__init__.py
+++ b/Tools/Scripts/webkitpy/thirdparty/__init__.py
@@ -84,8 +84,6 @@ class AutoinstallImportHook(object):
self._install_pywebsocket()
elif '.buildbot' in fullname:
self._install_buildbot()
- elif '.xmlrunner' in fullname:
- self._install_xmlrunner()
def _install_mechanize(self):
self._install("http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
@@ -133,12 +131,8 @@ class AutoinstallImportHook(object):
def _install_pywebsocket(self):
pywebsocket_dir = self._fs.join(_AUTOINSTALLED_DIR, "pywebsocket")
installer = AutoInstaller(target_dir=pywebsocket_dir)
- installer.install(url="http://pywebsocket.googlecode.com/files/mod_pywebsocket-0.7.2.tar.gz",
- url_subpath="pywebsocket-0.7.2/src/mod_pywebsocket")
-
- def _install_xmlrunner(self):
- self._install("http://pypi.python.org/packages/source/u/unittest-xml-reporting/unittest-xml-reporting-1.0.3.tar.gz#md5=cebf83281b0753b5d42bad38c91fd4d6",
- "unittest-xml-reporting-1.0.3/src/xmlrunner")
+ installer.install(url="http://pywebsocket.googlecode.com/files/mod_pywebsocket-0.7.4.tar.gz",
+ url_subpath="pywebsocket-0.7.4/src/mod_pywebsocket")
def _install(self, url, url_subpath):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
index 8b2d5c2c2..491ba79da 100644
--- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
+++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
@@ -33,6 +33,9 @@ class CommitQueueTaskDelegate(PatchAnalysisTaskDelegate):
def parent_command(self):
return "commit-queue"
+ def did_pass_testing_ews(self, patch):
+ raise NotImplementedError("subclasses must implement")
+
class CommitQueueTask(PatchAnalysisTask):
def validate(self):
@@ -58,6 +61,11 @@ class CommitQueueTask(PatchAnalysisTask):
"ChangeLog validated",
"ChangeLog did not pass validation")
+ def _did_pass_tests_recently(self):
+ if self._delegate.did_pass_testing_ews(self._patch):
+ return True
+ return self._test_patch()
+
def run(self):
if not self.validate():
return False
@@ -74,7 +82,7 @@ class CommitQueueTask(PatchAnalysisTask):
if not self._build_without_patch():
return False
return self.report_failure()
- if not self._test_patch():
+ if not self._did_pass_tests_recently():
return False
# Make sure the patch is still valid before landing (e.g., make sure
# no one has set commit-queue- since we started working on the patch.)
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
index 67d11e7c0..0cef8c867 100644
--- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
@@ -68,15 +68,15 @@ class MockCommitQueue(CommitQueueTaskDelegate):
def expected_failures(self):
return ExpectedFailures()
- def layout_test_results(self):
+ def test_results(self):
return None
def report_flaky_tests(self, patch, flaky_results, results_archive):
flaky_tests = [result.filename for result in flaky_results]
log("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename))
- def archive_last_layout_test_results(self, patch):
- log("archive_last_layout_test_results: patch='%s'" % patch.id())
+ def archive_last_test_results(self, patch):
+ log("archive_last_test_results: patch='%s'" % patch.id())
archive = Mock()
archive.filename = "mock-archive-%s.zip" % patch.id()
return archive
@@ -84,6 +84,9 @@ class MockCommitQueue(CommitQueueTaskDelegate):
def build_style(self):
return "both"
+ def did_pass_testing_ews(self, patch):
+ return False
+
class FailingTestCommitQueue(MockCommitQueue):
def __init__(self, error_plan, test_failure_plan):
@@ -99,8 +102,8 @@ class FailingTestCommitQueue(MockCommitQueue):
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
- def layout_test_results(self):
- # Doesn't make sense to ask for the layout_test_results until the tests have run at least once.
+ def test_results(self):
+ # Doesn't make sense to ask for the test_results until the tests have run at least once.
assert(self._test_run_counter >= 0)
failures_for_run = self._test_failure_plan[self._test_run_counter]
results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
@@ -144,6 +147,24 @@ command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_stderr)
+ def test_fast_success_case(self):
+ commit_queue = MockCommitQueue([])
+ commit_queue.did_pass_testing_ews = lambda patch: True
+ expected_stderr = """run_webkit_patch: ['clean']
+command_passed: success_message='Cleaned working directory' patch='10000'
+run_webkit_patch: ['update']
+command_passed: success_message='Updated working directory' patch='10000'
+run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
+command_passed: success_message='Applied patch' patch='10000'
+run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+command_passed: success_message='ChangeLog validated' patch='10000'
+run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
+command_passed: success_message='Built patch' patch='10000'
+run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
+command_passed: success_message='Landed patch' patch='10000'
+"""
+ self._run_through_task(commit_queue, expected_stderr)
+
def test_clean_failure(self):
commit_queue = MockCommitQueue([
ScriptError("MOCK clean failure"),
@@ -256,7 +277,7 @@ command_failed: failure_message='Unable to build without patch' script_error='MO
])
# CommitQueueTask will only report flaky tests if we successfully parsed
# results.html and returned a LayoutTestResults object, so we fake one.
- commit_queue.layout_test_results = lambda: LayoutTestResults([])
+ commit_queue.test_results = lambda: LayoutTestResults([])
expected_stderr = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
@@ -269,7 +290,7 @@ run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
@@ -287,10 +308,10 @@ command_passed: success_message='Landed patch' patch='10000'
None,
ScriptError("MOCK tests failure"),
])
- commit_queue.layout_test_results = lambda: LayoutTestResults([])
+ commit_queue.test_results = lambda: LayoutTestResults([])
# It's possible delegate to fail to archive layout tests, don't try to report
# flaky tests when that happens.
- commit_queue.archive_last_layout_test_results = lambda patch: None
+ commit_queue.archive_last_test_results = lambda patch: None
expected_stderr = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
@@ -339,7 +360,7 @@ run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
"""
@@ -371,10 +392,10 @@ run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_passed: success_message='Able to pass tests without patch' patch='10000'
"""
@@ -410,10 +431,10 @@ run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
@@ -453,10 +474,10 @@ run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
@@ -492,10 +513,10 @@ run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
-archive_last_layout_test_results: patch='10000'
+archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
index 3f2c1fca6..7be4a4a30 100644
--- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
+++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
@@ -28,7 +28,6 @@
import codecs
import logging
-import platform
import os.path
from webkitpy.common.net.layouttestresults import path_for_layout_test, LayoutTestResults
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command.py b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
index 19af969c1..7d3a6fd5e 100644
--- a/Tools/Scripts/webkitpy/tool/bot/irc_command.py
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
@@ -33,7 +33,6 @@ import re
from webkitpy.common.config import irc as config_irc
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
-from webkitpy.common.checkout.changelog import parse_bug_id
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.grammar import join_with_separators
@@ -41,7 +40,7 @@ from webkitpy.tool.grammar import join_with_separators
def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
tool.irc().post("%s" % exception)
- bug_id = parse_bug_id(exception.output)
+ bug_id = urls.parse_bug_id(exception.output)
if bug_id:
bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
tool.irc().post("%s: Ugg... Might have created %s" % (nicks_string, bug_url))
@@ -53,16 +52,6 @@ class IRCCommand(object):
raise NotImplementedError, "subclasses must implement"
-class LastGreenRevision(IRCCommand):
- def execute(self, nick, args, tool, sheriff):
- if not args:
- return "%s: Usage: last-green-revision BUILDER_NAME" % nick
- result = tool.buildbot.last_green_revision(' '.join(args))
- for line in result.split('\n'):
- if line:
- tool.irc().post("%s: %s" % (nick, line))
-
-
class Restart(IRCCommand):
def execute(self, nick, args, tool, sheriff):
tool.irc().post("Restarting...")
@@ -258,7 +247,6 @@ class CreateBug(IRCCommand):
visible_commands = {
"help": Help,
"hi": Hi,
- "last-green-revision": LastGreenRevision,
"restart": Restart,
"rollout": Rollout,
"whois": Whois,
diff --git a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
index 1a0366e25..94a70b2bc 100644
--- a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
+++ b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
@@ -27,6 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.net.layouttestresults import LayoutTestResults
+from webkitpy.common.net.unittestresults import UnitTestResults
from webkitpy.common.system.deprecated_logging import error, log
from webkitpy.tool.steps.runtests import RunTests
@@ -52,15 +53,27 @@ class LayoutTestResultsReader(object):
return None
return LayoutTestResults.results_from_string(results_html)
+ def _create_unit_test_results(self):
+ results_path = self._tool.port().unit_tests_results_path()
+ if not results_path:
+ return None
+ results_xml = self._read_file_contents(results_path)
+ if not results_xml:
+ return None
+ return UnitTestResults.results_from_string(results_xml)
+
def results(self):
- results = self._create_layout_test_results()
- # FIXME: We should not have to set failure_limit_count, but we
- # do until run-webkit-tests can be updated save off the value
- # of --exit-after-N-failures in results.html/results.json.
- # https://bugs.webkit.org/show_bug.cgi?id=58481
- if results:
- results.set_failure_limit_count(RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)
- return results
+ layout_test_results = self._create_layout_test_results()
+ unit_test_results = self._create_unit_test_results()
+ if layout_test_results:
+ # FIXME: We should not have to set failure_limit_count, but we
+ # do until run-webkit-tests can be updated save off the value
+ # of --exit-after-N-failures in results.html/results.json.
+ # https://bugs.webkit.org/show_bug.cgi?id=58481
+ layout_test_results.set_failure_limit_count(RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)
+ if unit_test_results:
+ layout_test_results.add_unit_test_failures(unit_test_results)
+ return layout_test_results
def _results_directory(self):
results_path = self._tool.port().layout_tests_results_path()
diff --git a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
index 78845dcd7..0eb348297 100644
--- a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
@@ -39,13 +39,41 @@ class LayoutTestResultsReaderTest(unittest.TestCase):
def test_missing_layout_test_results(self):
tool = MockTool()
reader = LayoutTestResultsReader(tool, "/var/logs")
- results_path = '/mock-results/full_results.json'
- tool.filesystem = MockFileSystem({results_path: None})
+ layout_tests_results_path = '/mock-results/full_results.json'
+ unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
+ tool.filesystem = MockFileSystem({layout_tests_results_path: None,
+ unit_tests_results_path: None})
# Make sure that our filesystem mock functions as we expect.
- self.assertRaises(IOError, tool.filesystem.read_text_file, results_path)
+ self.assertRaises(IOError, tool.filesystem.read_text_file, layout_tests_results_path)
+ self.assertRaises(IOError, tool.filesystem.read_text_file, unit_tests_results_path)
# layout_test_results shouldn't raise even if the results.html file is missing.
self.assertEquals(reader.results(), None)
+ def test_create_unit_test_results(self):
+ tool = MockTool()
+ reader = LayoutTestResultsReader(tool, "/var/logs")
+ unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
+ no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
+<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
+ <testsuite name="RenderTableCellDeathTest" tests="3" failures="0" disabled="0" errors="0" time="0.677">
+ <testcase name="CanSetColumn" status="run" time="0.168" classname="RenderTableCellDeathTest" />
+ <testcase name="CrashIfSettingUnsetColumnIndex" status="run" time="0.129" classname="RenderTableCellDeathTest" />
+ <testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
+ </testsuite>
+</testsuites>"""
+ tool.filesystem = MockFileSystem({unit_tests_results_path: no_failures_xml})
+ self.assertEquals(reader._create_unit_test_results(), [])
+
+ def test_missing_unit_test_results_path(self):
+ tool = MockTool()
+ tool.port().unit_tests_results_path = lambda: None
+ reader = LayoutTestResultsReader(tool, "/var/logs")
+ reader._create_layout_test_results = lambda: LayoutTestResults([])
+ # layout_test_results shouldn't raise even if the unit tests xml file is missing.
+ self.assertNotEquals(reader.results(), None)
+ self.assertEquals(reader.results().failing_tests(), [])
+
+
def test_layout_test_results(self):
reader = LayoutTestResultsReader(MockTool(), "/var/logs")
reader._read_file_contents = lambda path: None
diff --git a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
index bcd3d304b..96518c69e 100644
--- a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
+++ b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
@@ -55,10 +55,10 @@ class PatchAnalysisTaskDelegate(object):
def expected_failures(self):
raise NotImplementedError("subclasses must implement")
- def layout_test_results(self):
+ def test_results(self):
raise NotImplementedError("subclasses must implement")
- def archive_last_layout_test_results(self, patch):
+ def archive_last_test_results(self, patch):
raise NotImplementedError("subclasses must implement")
def build_style(self):
@@ -145,7 +145,7 @@ class PatchAnalysisTask(object):
"Passed tests",
"Patch does not pass tests")
- self._expected_failures.shrink_expected_failures(self._delegate.layout_test_results(), success)
+ self._expected_failures.shrink_expected_failures(self._delegate.test_results(), success)
return success
def _build_and_test_without_patch(self):
@@ -160,7 +160,7 @@ class PatchAnalysisTask(object):
"Able to pass tests without patch",
"Unable to pass tests without patch (tree is red?)")
- self._expected_failures.shrink_expected_failures(self._delegate.layout_test_results(), success)
+ self._expected_failures.shrink_expected_failures(self._delegate.test_results(), success)
return success
def _land(self):
@@ -187,10 +187,10 @@ class PatchAnalysisTask(object):
if self._test():
return True
- # Note: archive_last_layout_test_results deletes the results directory, making these calls order-sensitve.
- # We could remove this dependency by building the layout_test_results from the archive.
- first_results = self._delegate.layout_test_results()
- first_results_archive = self._delegate.archive_last_layout_test_results(self._patch)
+ # Note: archive_last_test_results deletes the results directory, making these calls order-sensitve.
+ # We could remove this dependency by building the test_results from the archive.
+ first_results = self._delegate.test_results()
+ first_results_archive = self._delegate.archive_last_test_results(self._patch)
first_script_error = self._script_error
if self._expected_failures.failures_were_expected(first_results):
@@ -202,7 +202,7 @@ class PatchAnalysisTask(object):
self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
return True
- second_results = self._delegate.layout_test_results()
+ second_results = self._delegate.test_results()
if self._results_failed_different_tests(first_results, second_results):
# We could report flaky tests here, but we would need to be careful
# to use similar checks to ExpectedFailures._can_trust_results
@@ -211,15 +211,15 @@ class PatchAnalysisTask(object):
# See https://bugs.webkit.org/show_bug.cgi?id=51272
return False
- # Archive (and remove) second results so layout_test_results() after
+ # Archive (and remove) second results so test_results() after
# build_and_test_without_patch won't use second results instead of the clean-tree results.
- second_results_archive = self._delegate.archive_last_layout_test_results(self._patch)
+ second_results_archive = self._delegate.archive_last_test_results(self._patch)
if self._build_and_test_without_patch():
# The error from the previous ._test() run is real, report it.
return self.report_failure(first_results_archive, first_results, first_script_error)
- clean_tree_results = self._delegate.layout_test_results()
+ clean_tree_results = self._delegate.test_results()
self._expected_failures.grow_expected_failures(clean_tree_results)
# Re-check if the original results are now to be expected to avoid a full re-try.
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff.py b/Tools/Scripts/webkitpy/tool/bot/sheriff.py
index df2686803..a8c928c9b 100644
--- a/Tools/Scripts/webkitpy/tool/bot/sheriff.py
+++ b/Tools/Scripts/webkitpy/tool/bot/sheriff.py
@@ -27,7 +27,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.config import urls
-from webkitpy.common.checkout.changelog import parse_bug_id
from webkitpy.common.system.deprecated_logging import log
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.grammar import join_with_separators
@@ -88,7 +87,7 @@ class Sheriff(object):
svn_revisions,
rollout_reason,
])
- return parse_bug_id(output)
+ return urls.parse_bug_id(output)
def post_chromium_deps_roll(self, revision, revision_name):
args = [
@@ -100,7 +99,7 @@ class Sheriff(object):
# revision can be None, but revision_name is always something meaningful.
args += [revision, revision_name]
output = self._sheriffbot.run_webkit_patch(args)
- return parse_bug_id(output)
+ return urls.parse_bug_id(output)
def post_blame_comment_on_bug(self, commit_info, builders, tests):
if not commit_info.bug_id():
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py b/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py
index 312e2a29b..018f1f733 100644
--- a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py
@@ -82,13 +82,9 @@ class SheriffIRCBotTest(unittest.TestCase):
OutputCapture().assert_outputs(self, run, args=["hi"], expected_stderr=expected_stderr)
def test_help(self):
- expected_stderr = "MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, last-green-revision, restart, roll-chromium-deps, rollout, whois\n"
+ expected_stderr = "MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, restart, roll-chromium-deps, rollout, whois\n"
OutputCapture().assert_outputs(self, run, args=["help"], expected_stderr=expected_stderr)
- def test_lgr(self):
- expected_stderr = "MOCK: irc.post: mock_nick: Some Builder 1: 9479\nMOCK: irc.post: mock_nick: Some Builder 2: 9400\n"
- OutputCapture().assert_outputs(self, run, args=["last-green-revision Some Builder"], expected_stderr=expected_stderr)
-
def test_restart(self):
expected_stderr = "MOCK: irc.post: Restarting...\n"
OutputCapture().assert_outputs(self, run, args=["restart"], expected_stderr=expected_stderr, expected_exception=TerminateQueue)
diff --git a/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py b/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
index 269cf25cf..6c54da25e 100644
--- a/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
+++ b/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
@@ -35,6 +35,7 @@ class AbstractLocalServerCommand(AbstractDeclarativeCommand):
def __init__(self):
options = [
make_option("--httpd-port", action="store", type="int", default=8127, help="Port to use for the HTTP server"),
+ make_option("--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with the rebaseline server"),
]
AbstractDeclarativeCommand.__init__(self, options=options)
@@ -48,8 +49,9 @@ class AbstractLocalServerCommand(AbstractDeclarativeCommand):
print "Starting server at %s" % server_url
print "Use the 'Exit' link in the UI, %squitquitquit or Ctrl-C to stop" % server_url
- # FIXME: This seems racy.
- threading.Timer(0.1, lambda: self._tool.user.open_url(server_url)).start()
+ if options.show_results:
+ # FIXME: This seems racy.
+ threading.Timer(0.1, lambda: self._tool.user.open_url(server_url)).start()
httpd = self.server(httpd_port=options.httpd_port, config=config)
httpd.serve_forever()
diff --git a/Tools/Scripts/webkitpy/tool/commands/download.py b/Tools/Scripts/webkitpy/tool/commands/download.py
index 611ca9254..4f6b7370e 100644
--- a/Tools/Scripts/webkitpy/tool/commands/download.py
+++ b/Tools/Scripts/webkitpy/tool/commands/download.py
@@ -202,6 +202,23 @@ class ProcessBugsMixin(object):
return all_patches
+class ProcessURLsMixin(object):
+ def _fetch_list_of_patches_to_process(self, options, args, tool):
+ all_patches = []
+ for url in args:
+ bug_id = urls.parse_bug_id(url)
+ if bug_id:
+ patches = tool.bugs.fetch_bug(bug_id).patches()
+ log("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id))
+ all_patches += patches
+
+ attachment_id = urls.parse_attachment_id(url)
+ if attachment_id:
+ all_patches += tool.bugs.fetch_attachment(attachment_id)
+
+ return all_patches
+
+
class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
name = "check-style"
help_text = "Run check-webkit-style on the specified attachments"
@@ -317,6 +334,12 @@ class LandFromBug(AbstractPatchLandingCommand, ProcessBugsMixin):
show_in_main_help = True
+class LandFromURL(AbstractPatchLandingCommand, ProcessURLsMixin):
+ name = "land-from-url"
+ help_text = "Land all patches on the given URLs, optionally building and testing them first"
+ argument_names = "URL [URLS]"
+
+
class ValidateChangelog(AbstractSequencedCommand):
name = "validate-changelog"
help_text = "Validate that the ChangeLogs and reviewers look reasonable"
diff --git a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
index eab8461b9..f936a9673 100644
--- a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
@@ -193,7 +193,7 @@ Not closing bug 50000 as attachment 10000 has review=+. Assuming there are more
"""
self.assert_execute_outputs(LandAttachment(), [10000], options=self._default_options(), expected_stderr=expected_stderr)
- def test_land_patches(self):
+ def test_land_from_bug(self):
# FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
expected_stderr = """2 reviewed patches found on bug 50000.
Processing 2 patches from 1 bug.
@@ -220,6 +220,33 @@ Not closing bug 50000 as attachment 10000 has review=+. Assuming there are more
"""
self.assert_execute_outputs(LandFromBug(), [50000], options=self._default_options(), expected_stderr=expected_stderr)
+ def test_land_from_url(self):
+ # FIXME: This expected result is imperfect, notice how it's seeing the same patch as still there after it thought it would have cleared the flags.
+ expected_stderr = """2 patches found on bug 50000.
+Processing 2 patches from 1 bug.
+Updating working directory
+Processing patch 10000 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+. Assuming there are more patches to land from this bug.
+Updating working directory
+Processing patch 10001 from bug 50000.
+Building WebKit
+Running Python unit tests
+Running Perl unit tests
+Running JavaScriptCore tests
+Running WebKit unit tests
+Running run-webkit-tests
+Committed r49824: <http://trac.webkit.org/changeset/49824>
+Not closing bug 50000 as attachment 10000 has review=+. Assuming there are more patches to land from this bug.
+"""
+ self.assert_execute_outputs(LandFromURL(), ["https://bugs.webkit.org/show_bug.cgi?id=50000"], options=self._default_options(), expected_stderr=expected_stderr)
+
def test_prepare_rollout(self):
expected_stderr = "Preparing rollout for bug 50000.\nUpdating working directory\n"
self.assert_execute_outputs(PrepareRollout(), [852, "Reason"], options=self._default_options(), expected_stderr=expected_stderr)
@@ -234,6 +261,7 @@ Reason
component: MOCK component
cc: MOCK cc
blocked: 50000
+MOCK reopen_bug 50000 with comment 'Re-opened since this is blocked by 50004'
MOCK add_patch_to_bug: bug_id=50004, description=ROLLOUT of r852, mark_for_review=False, mark_for_commit_queue=True, mark_for_landing=False
-- Begin comment --
Any committer can land this patch automatically by marking it commit-queue+. The commit-queue will build and test the patch before landing to ensure that the rollout will be successful. This process takes approximately 15 minutes.
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
index 37bc4b72e..639f4d8fc 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
@@ -112,10 +112,10 @@ class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDele
def expected_failures(self):
return self._expected_failures
- def layout_test_results(self):
+ def test_results(self):
return self._layout_test_results_reader.results()
- def archive_last_layout_test_results(self, patch):
+ def archive_last_test_results(self, patch):
return self._layout_test_results_reader.archive(patch)
def build_style(self):
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
index 3adad4c58..9dbb398ee 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
@@ -73,7 +73,7 @@ class EarlyWarningSytemTest(QueuesTest):
self.assert_queue_outputs(ews, expected_stderr=self._default_expected_stderr(ews), options=options)
def _test_testing_ews(self, ews):
- ews.layout_test_results = lambda: None
+ ews.test_results = lambda: None
ews.bind_to_tool(MockTool())
expected_stderr = self._default_expected_stderr(ews)
expected_stderr["handle_script_error"] = "ScriptError error message\n\nMOCK output\n"
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries.py b/Tools/Scripts/webkitpy/tool/commands/queries.py
index cf7eb9daa..ee476f953 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries.py
@@ -4,7 +4,7 @@
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -14,7 +14,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -27,6 +27,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import fnmatch
+import re
from optparse import make_option
@@ -42,7 +44,8 @@ from webkitpy.common.system.user import User
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
from webkitpy.common.system.deprecated_logging import log
-from webkitpy.layout_tests import port
+from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationSerializer
+from webkitpy.layout_tests.port import port_options
class SuggestReviewers(AbstractDeclarativeCommand):
@@ -127,15 +130,6 @@ class PatchesToReview(AbstractDeclarativeCommand):
print patch_id
-class LastGreenRevision(AbstractDeclarativeCommand):
- name = "last-green-revision"
- help_text = "Prints the last known good revision"
- argument_names = "BUILDER_NAME"
-
- def execute(self, options, args, tool):
- print self._tool.buildbot.last_green_revision(args[0])
-
-
class WhatBroke(AbstractDeclarativeCommand):
name = "what-broke"
help_text = "Print failing buildbots (%s) and what revisions broke them" % config_urls.buildbot_url
@@ -379,30 +373,157 @@ and PID and prints it to stdout."""
argument_names = "PROCESS_NAME [PID]"
def execute(self, options, args, tool):
- crash_logs = CrashLogs(tool.filesystem)
+ crash_logs = CrashLogs(tool)
pid = None
if len(args) > 1:
pid = int(args[1])
print crash_logs.find_newest_log(args[0], pid)
-class SkippedPorts(AbstractDeclarativeCommand):
- name = "skipped-ports"
- help_text = "Print the list of ports skipping the given layout test(s)"
- long_help = """Scans the the Skipped file of each port and figure
-out what ports are skipping the test(s). Categories are taken in account too."""
- argument_names = "TEST_NAME"
+class PrintExpectations(AbstractDeclarativeCommand):
+ name = 'print-expectations'
+ help_text = 'Print the expected result for the given test(s) on the given port(s)'
+
+ def __init__(self):
+ options = [
+ make_option('--all', action='store_true', default=False,
+ help='display the expectations for *all* tests'),
+ make_option('-x', '--exclude-keyword', action='append', default=[],
+ help='limit to tests not matching the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+ make_option('-i', '--include-keyword', action='append', default=[],
+ help='limit to tests with the given keyword (for example, "skip", "slow", or "crash". May specify multiple times'),
+ make_option('--csv', action='store_true', default=False,
+ help='Print a CSV-style report that includes the port name, modifiers, tests, and expectations'),
+ make_option('-f', '--full', action='store_true', default=False,
+ help='Print a full test_expectations.txt-style line for every match'),
+ ] + port_options(platform='port/platform to use. Use glob-style wildcards for multiple ports (implies --csv)')
+
+ AbstractDeclarativeCommand.__init__(self, options=options)
+ self._expectation_models = {}
def execute(self, options, args, tool):
- results = dict([(test_name, []) for test_name in args])
- for port_name in tool.port_factory.all_port_names():
- port_object = tool.port_factory.get(port_name)
- for test_name in args:
- if port_object.skips_layout_test(test_name):
- results[test_name].append(port_name)
-
- for test_name, ports in results.iteritems():
- if ports:
- print "Ports skipping test %r: %s" % (test_name, ', '.join(ports))
- else:
- print "Test %r is not skipped by any port." % test_name
+ if not args and not options.all:
+ print "You must either specify one or more test paths or --all."
+ return
+
+ default_port = tool.port_factory.get(options=options)
+ if options.platform:
+ port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+ if not port_names:
+ default_port = tool.port_factory.get(options.platform)
+ if default_port:
+ port_names = [default_port.name()]
+ else:
+ print "No port names match '%s'" % options.platform
+ return
+ else:
+ port_names = [default_port.name()]
+
+ serializer = TestExpectationSerializer()
+ tests = default_port.tests(args)
+ for port_name in port_names:
+ model = self._model(options, port_name, tests)
+ tests_to_print = self._filter_tests(options, model, tests)
+ lines = [model.get_expectation_line(test) for test in sorted(tests_to_print)]
+ print '\n'.join(self._format_lines(options, port_name, serializer, lines))
+
+ def _filter_tests(self, options, model, tests):
+ filtered_tests = set()
+ if options.include_keyword:
+ for keyword in options.include_keyword:
+ filtered_tests.update(model.get_test_set_for_keyword(keyword))
+ else:
+ filtered_tests = tests
+
+ for keyword in options.exclude_keyword:
+ filtered_tests.difference_update(model.get_test_set_for_keyword(keyword))
+ return filtered_tests
+
+ def _format_lines(self, options, port_name, serializer, lines):
+ output = []
+ if options.csv:
+ for line in lines:
+ output.append("%s,%s" % (port_name, serializer.to_csv(line)))
+ elif lines:
+ include_modifiers = options.full
+ include_expectations = options.full or len(options.include_keyword) != 1 or len(options.exclude_keyword)
+ output.append("// For %s" % port_name)
+ for line in lines:
+ output.append("%s" % serializer.to_string(line, include_modifiers, include_expectations, include_comment=False))
+ return output
+
+ def _model(self, options, port_name, tests):
+ port = self._tool.port_factory.get(port_name, options)
+ expectations_path = port.path_to_test_expectations_file()
+ if not expectations_path in self._expectation_models:
+ lint_mode = False
+ self._expectation_models[expectations_path] = TestExpectations(port, tests,
+ port.test_expectations(),
+ port.test_configuration(),
+ lint_mode,
+ port.test_expectations_overrides(),
+ port.skipped_layout_tests(tests)).model()
+ return self._expectation_models[expectations_path]
+
+
+class PrintBaselines(AbstractDeclarativeCommand):
+ name = 'print-baselines'
+ help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
+
+ def __init__(self):
+ options = [
+ make_option('--all', action='store_true', default=False,
+ help='display the baselines for *all* tests'),
+ make_option('--csv', action='store_true', default=False,
+ help='Print a CSV-style report that includes the port name, test_name, test platform, baseline type, baseline location, and baseline platform'),
+ make_option('--include-virtual-tests', action='store_true',
+ help='Include virtual tests'),
+ ] + port_options(platform='port/platform to use. Use glob-style wildcards for multiple ports (implies --csv)')
+ AbstractDeclarativeCommand.__init__(self, options=options)
+ self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
+
+ def execute(self, options, args, tool):
+ if not args and not options.all:
+ print "You must either specify one or more test paths or --all."
+ return
+
+ default_port = tool.port_factory.get()
+ if options.platform:
+ port_names = fnmatch.filter(tool.port_factory.all_port_names(), options.platform)
+ if not port_names:
+ print "No port names match '%s'" % options.platform
+ else:
+ port_names = [default_port.name()]
+
+ if len(port_names) > 1:
+ options.csv = True
+
+ if options.include_virtual_tests:
+ tests = sorted(default_port.tests(args))
+ else:
+ # FIXME: make real_tests() a public method.
+ tests = sorted(default_port._real_tests(args))
+
+ if not options.csv:
+ print "// For %s" % port_names[0]
+
+ for port_name in port_names:
+ port = tool.port_factory.get(port_name)
+ for test_name in tests:
+ self._print_baselines(options, port_name, test_name, port.expected_baselines_by_extension(test_name))
+
+ def _print_baselines(self, options, port_name, test_name, baselines):
+ for extension in sorted(baselines.keys()):
+ baseline_location = baselines[extension]
+ if baseline_location:
+ if options.csv:
+ print "%s,%s,%s,%s,%s,%s" % (port_name, test_name, self._platform_for_path(test_name),
+ extension[1:], baseline_location, self._platform_for_path(baseline_location))
+ else:
+ print baseline_location
+
+ def _platform_for_path(self, relpath):
+ platform_matchobj = self._platform_regexp.match(relpath)
+ if platform_matchobj:
+ return platform_matchobj.group(1)
+ return None
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
index fe13a4c54..0fb59a853 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -28,11 +28,13 @@
import unittest
+from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.bugzilla import Bugzilla
+from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.queries import *
-from webkitpy.tool.mocktool import MockTool
+from webkitpy.tool.mocktool import MockTool, MockOptions
class MockTestPort1(object):
@@ -89,19 +91,6 @@ class QueryCommandsTest(CommandsTest):
expected_stdout = "ok : Builder1\nok : Builder2\n"
self.assert_execute_outputs(TreeStatus(), None, expected_stdout)
- def test_skipped_ports(self):
- tool = MockTool()
- tool.port_factory = MockPortFactory()
-
- expected_stdout = "Ports skipping test 'media/foo/bar.html': test_port1, test_port2\n"
- self.assert_execute_outputs(SkippedPorts(), ("media/foo/bar.html",), expected_stdout, tool=tool)
-
- expected_stdout = "Ports skipping test 'foo': test_port1\n"
- self.assert_execute_outputs(SkippedPorts(), ("foo",), expected_stdout, tool=tool)
-
- expected_stdout = "Test 'media' is not skipped by any port.\n"
- self.assert_execute_outputs(SkippedPorts(), ("media",), expected_stdout, tool=tool)
-
class FailureReasonTest(unittest.TestCase):
def test_blame_line_for_revision(self):
@@ -115,3 +104,94 @@ class FailureReasonTest(unittest.TestCase):
raise Exception("MESSAGE")
tool.checkout().commit_info_for_revision = raising_mock
self.assertEquals(command._blame_line_for_revision(None), "FAILED to fetch CommitInfo for rNone, exception: MESSAGE")
+
+
+class PrintExpectationsTest(unittest.TestCase):
+ def run_test(self, tests, expected_stdout, **args):
+ options = MockOptions(all=False, csv=False, full=False, platform='test-win-xp',
+ include_keyword=[], exclude_keyword=[]).update(**args)
+ tool = MockTool()
+ command = PrintExpectations()
+ command.bind_to_tool(tool)
+
+ oc = OutputCapture()
+ try:
+ oc.capture_output()
+ command.execute(options, tests, tool)
+ finally:
+ stdout, _, _ = oc.restore_output()
+ self.assertEquals(stdout, expected_stdout)
+
+ def test_basic(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/image.html = IMAGE\n'
+ 'failures/expected/text.html = TEXT\n'))
+
+ def test_full(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'WONTFIX : failures/expected/image.html = IMAGE\n'
+ 'WONTFIX : failures/expected/text.html = TEXT\n'),
+ full=True)
+
+ def test_exclude(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/text.html = TEXT\n'),
+ exclude_keyword=['image'])
+
+ def test_include(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('// For test-win-xp\n'
+ 'failures/expected/image.html\n'),
+ include_keyword=['image'])
+
+ def test_csv(self):
+ self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
+ ('test-win-xp,failures/expected/image.html,wontfix,image\n'
+ 'test-win-xp,failures/expected/text.html,wontfix,text\n'),
+ csv=True)
+
+
+class PrintBaselinesTest(unittest.TestCase):
+ def setUp(self):
+ self.oc = None
+ self.tool = MockTool()
+ self.test_port = self.tool.port_factory.get('test-win-xp')
+ self.tool.port_factory.get = lambda port_name=None: self.test_port
+ self.tool.port_factory.all_port_names = lambda: ['test-win-xp']
+
+ def tearDown(self):
+ if self.oc:
+ self.restore_output()
+
+ def capture_output(self):
+ self.oc = OutputCapture()
+ self.oc.capture_output()
+
+ def restore_output(self):
+ stdout, stderr, logs = self.oc.restore_output()
+ self.oc = None
+ return (stdout, stderr, logs)
+
+ def test_basic(self):
+ command = PrintBaselines()
+ command.bind_to_tool(self.tool)
+ self.capture_output()
+ command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
+ stdout, _, _ = self.restore_output()
+ self.assertEquals(stdout,
+ ('// For test-win-xp\n'
+ 'passes/text-expected.png\n'
+ 'passes/text-expected.txt\n'))
+
+ def test_csv(self):
+ command = PrintBaselines()
+ command.bind_to_tool(self.tool)
+ self.capture_output()
+ command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
+ stdout, _, _ = self.restore_output()
+ self.assertEquals(stdout,
+ ('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
+ 'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py
index 18ee4cb16..2af08b718 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues.py
@@ -317,10 +317,10 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD
def expected_failures(self):
return self._expected_failures
- def layout_test_results(self):
+ def test_results(self):
return self._layout_test_results_reader.results()
- def archive_last_layout_test_results(self, patch):
+ def archive_last_test_results(self, patch):
return self._layout_test_results_reader.archive(patch)
def build_style(self):
@@ -333,6 +333,12 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD
reporter = FlakyTestReporter(self._tool, self.name)
reporter.report_flaky_tests(patch, flaky_test_results, results_archive)
+ def did_pass_testing_ews(self, patch):
+ # Currently, chromium-ews is the only testing EWS. Once there are more,
+ # should make sure they all pass.
+ status = self._tool.status_server.patch_status("chromium-ews", patch.id())
+ return status == self._pass_status
+
# StepSequenceErrorHandler methods
def handle_script_error(cls, tool, state, script_error):
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
index f2c60d9eb..1c2d57b1c 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
@@ -232,6 +232,7 @@ class CommitQueueTest(QueuesTest):
def test_commit_queue(self):
tool = MockTool()
tool.filesystem.write_text_file('/mock-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+ tool.filesystem.write_text_file('/mock-results/webkit_unit_tests_output.xml', '')
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
"next_work_item": "",
@@ -310,6 +311,7 @@ MOCK: release_work_item: commit-queue 10000
def test_rollout(self):
tool = MockTool(log_executive=True)
tool.filesystem.write_text_file('/mock-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+ tool.filesystem.write_text_file('/mock-results/webkit_unit_tests_output.xml', '')
tool.buildbot.light_tree_on_fire()
expected_stderr = {
"begin_work_queue": self._default_begin_work_queue_stderr("commit-queue"),
@@ -381,6 +383,7 @@ MOCK: release_work_item: commit-queue 10005
queue = SecondThoughtsCommitQueue(MockTool())
queue.begin_work_queue()
queue._tool.filesystem.write_text_file('/mock-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
+ queue._tool.filesystem.write_text_file('/mock-results/webkit_unit_tests_output.xml', '')
queue._options = Mock()
queue._options.port = None
expected_stderr = """MOCK: update_status: commit-queue Cleaned working directory
@@ -436,6 +439,12 @@ The commit-queue is continuing to process your patch.
OutputCapture().assert_outputs(self, queue.report_flaky_tests, [QueuesTest.mock_work_item, test_results, MockZipFile()], expected_stderr=expected_stderr)
+ def test_did_pass_testing_ews(self):
+ tool = MockTool()
+ patch = tool.bugs.fetch_attachment(10000)
+ queue = TestCommitQueue(tool)
+ self.assertFalse(queue.did_pass_testing_ews(patch))
+
class StyleQueueTest(QueuesTest):
def test_style_queue_with_style_exception(self):
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
index f47494acd..fc118949c 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -38,15 +38,17 @@ from webkitpy.common.net.buildbot import BuildBot
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.user import User
+from webkitpy.common.system.zipfileset import ZipFileSet
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.layout_tests.port import builders
from webkitpy.tool.grammar import pluralize
from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
-_baseline_suffix_list = ['png', 'txt']
+_baseline_suffix_list = ['png', 'wav', 'txt']
# FIXME: Should TestResultWriter know how to compute this string?
@@ -57,28 +59,45 @@ def _baseline_name(fs, test_name, suffix):
class RebaselineTest(AbstractDeclarativeCommand):
name = "rebaseline-test"
help_text = "Rebaseline a single test from a buildbot. (Currently works only with build.chromium.org buildbots.)"
- argument_names = "BUILDER_NAME TEST_NAME [PLATFORM_TO_MOVE_EXISTING_BASELINES_TO]"
+ argument_names = "BUILDER_NAME TEST_NAME [PLATFORMS_TO_MOVE_EXISTING_BASELINES_TO]"
- def _results_url(self, builder_name):
+ def __init__(self, options=None, **kwargs):
+ super(RebaselineTest, self).__init__(options, **kwargs)
+ self._zip_file_sets = {}
+
+ def _results_zip_url(self, builder_name):
# FIXME: Generalize this command to work with non-build.chromium.org builders.
- builder = self._tool.chromium_buildbot().builder_with_name(builder_name)
- return builder.accumulated_results_url()
+ return self._tool.chromium_buildbot().builder_with_name(builder_name).accumulated_results_url().replace('results/layout-test-results', 'layout-test-results.zip')
def _baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
return port.baseline_path()
- def _copy_existing_baseline(self, platform_to_move_existing_baselines_to, test_name, suffix):
- port = self._tool.port_factory.get(platform_to_move_existing_baselines_to)
- old_baseline = port.expected_filename(test_name, "." + suffix)
- if not self._tool.filesystem.exists(old_baseline):
- print("No existing baseline for %s." % test_name)
- return
+ def _copy_existing_baseline(self, platforms_to_move_existing_baselines_to, test_name, suffix):
+ old_baselines = []
+ new_baselines = []
+
+ # Need to gather all the baseline paths before modifying the filesystem since
+ # the modifications can affect the results of port.expected_filename.
+ for platform in platforms_to_move_existing_baselines_to:
+ port = self._tool.port_factory.get(platform)
+ old_baseline = port.expected_filename(test_name, "." + suffix)
+ if not self._tool.filesystem.exists(old_baseline):
+ print("No existing baseline for %s." % test_name)
+ continue
+
+ new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
+ if self._tool.filesystem.exists(new_baseline):
+ print("Existing baseline at %s, not copying over it." % new_baseline)
+ continue
+
+ old_baselines.append(old_baseline)
+ new_baselines.append(new_baseline)
+
+ for i in range(len(old_baselines)):
+ old_baseline = old_baselines[i]
+ new_baseline = new_baselines[i]
- new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
- if self._tool.filesystem.exists(new_baseline):
- print("Existing baseline at %s, not copying over it." % new_baseline)
- else:
print("Copying baseline from %s to %s." % (old_baseline, new_baseline))
self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
self._tool.filesystem.copyfile(old_baseline, new_baseline)
@@ -94,6 +113,17 @@ class RebaselineTest(AbstractDeclarativeCommand):
if not self._tool.scm().exists(target_baseline):
self._tool.scm().add(target_baseline)
+ def _update_expectations_file(self, builder_name, test_name):
+ port = self._tool.port_factory.get_from_builder_name(builder_name)
+ expectationsString = port.test_expectations()
+ expectations = TestExpectations(port, None, expectationsString, port.test_configuration())
+
+ for test_configuration in port.all_test_configurations():
+ if test_configuration.version == port.test_configuration().version:
+ expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)
+
+ self._tool.filesystem.write_text_file(port.path_to_test_expectations_file(), expectationsString)
+
def _test_root(self, test_name):
return os.path.splitext(test_name)[0]
@@ -103,27 +133,47 @@ class RebaselineTest(AbstractDeclarativeCommand):
def _file_name_for_expected_result(self, test_name, suffix):
return "%s-expected.%s" % (self._test_root(test_name), suffix)
- def _rebaseline_test(self, builder_name, test_name, platform_to_move_existing_baselines_to, suffix):
- results_url = self._results_url(builder_name)
+ def _zip_file_set(self, url):
+ return ZipFileSet(url)
+
+ def _fetch_baseline(self, builder_name, test_name, suffix):
+ # FIXME: See https://bugs.webkit.org/show_bug.cgi?id=84762 ... fetching the whole
+ # zip file and then extracting individual results is much slower than just fetching
+ # the result directly from the buildbot, but it guarantees that we are getting correct results.
+ member_name = self._file_name_for_actual_result(test_name, suffix)
+ zip_url = self._results_zip_url(builder_name)
+ if not builder_name in self._zip_file_sets:
+ print "Retrieving " + zip_url
+ self._zip_file_sets[builder_name] = self._zip_file_set(zip_url)
+
+ try:
+ data = self._zip_file_sets[builder_name].read('layout-test-results/' + member_name)
+ print " Found " + member_name
+ return data
+ except KeyError, e:
+ return None
+
+ def _rebaseline_test(self, builder_name, test_name, platforms_to_move_existing_baselines_to, suffix):
baseline_directory = self._baseline_directory(builder_name)
- source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
- if platform_to_move_existing_baselines_to:
- self._copy_existing_baseline(platform_to_move_existing_baselines_to, test_name, suffix)
+ if platforms_to_move_existing_baselines_to:
+ self._copy_existing_baseline(platforms_to_move_existing_baselines_to, test_name, suffix)
- print "Retrieving %s." % source_baseline
- self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline)
+ self._save_baseline(self._fetch_baseline(builder_name, test_name, suffix), target_baseline)
- def execute(self, options, args, tool):
+ def _rebaseline_test_and_update_expectations(self, builder_name, test_name, platforms_to_move_existing_baselines_to):
for suffix in _baseline_suffix_list:
- if len(args) > 2:
- platform_to_move_existing_baselines_to = args[2]
- else:
- platform_to_move_existing_baselines_to = None
+ self._rebaseline_test(builder_name, test_name, platforms_to_move_existing_baselines_to, suffix)
+ self._update_expectations_file(builder_name, test_name)
- self._rebaseline_test(args[0], args[1], platform_to_move_existing_baselines_to, suffix)
+ def execute(self, options, args, tool):
+ if len(args) > 2:
+ platforms_to_move_existing_baselines_to = args[2:]
+ else:
+ platforms_to_move_existing_baselines_to = None
+ self._rebaseline_test_and_update_expectations(args[0], args[1], platforms_to_move_existing_baselines_to)
class OptimizeBaselines(AbstractDeclarativeCommand):
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
index 37e8a391b..f5f7db52a 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -36,58 +36,95 @@ from webkitpy.common.system.executive_mock import MockExecutive
class TestRebaseline(unittest.TestCase):
+ def stub_rebaseline_test_command_and_tool(self):
+
+ class FakeZipFileSet(object):
+ contents = {}
+
+ def read(self, member):
+ return self.contents[member]
+
+ command = RebaselineTest()
+ tool = MockTool()
+ command.bind_to_tool(tool)
+ command._zip_file_set = lambda url: FakeZipFileSet()
+ return (command, tool)
+
def test_tests_to_update(self):
command = Rebaseline()
command.bind_to_tool(MockTool())
build = Mock()
OutputCapture().assert_outputs(self, command._tests_to_update, [build])
+ def test_rebaseline_updates_expectations_file_noop(self):
+ command, tool = self.stub_rebaseline_test_command_and_tool()
+ lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
+ tool.filesystem.write_text_file(lion_port.path_to_test_expectations_file(), """BUGB MAC LINUX XP DEBUG : fast/dom/Window/window-postmessage-clone-really-deep-array.html = PASS
+BUGA DEBUG : fast/css/large-list-of-rules-crash.html = TEXT
+""")
+ tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "fast/dom/Window/window-postmessage-clone-really-deep-array.html"), "Dummy test contents")
+ tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "fast/css/large-list-of-rules-crash.html"), "Dummy test contents")
+ tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "userscripts/another-test.html"), "Dummy test contents")
+
+ expected_stdout = "Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip\n"
+ OutputCapture().assert_outputs(self, command._rebaseline_test_and_update_expectations, ["Webkit Mac10.7", "userscripts/another-test.html", None], expected_stdout=expected_stdout)
+
+ new_expectations = tool.filesystem.read_text_file(lion_port.path_to_test_expectations_file())
+ self.assertEqual(new_expectations, """BUGB MAC LINUX XP DEBUG : fast/dom/Window/window-postmessage-clone-really-deep-array.html = PASS
+BUGA DEBUG : fast/css/large-list-of-rules-crash.html = TEXT
+""")
+
+ def test_rebaseline_updates_expectations_file(self):
+ command, tool = self.stub_rebaseline_test_command_and_tool()
+
+ lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
+ tool.filesystem.write_text_file(lion_port.path_to_test_expectations_file(), "BUGX MAC : userscripts/another-test.html = IMAGE\nBUGZ LINUX : userscripts/another-test.html = IMAGE\n")
+ tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "userscripts/another-test.html"), "Dummy test contents")
+
+ expected_stdout = "Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip\n"
+ OutputCapture().assert_outputs(self, command._rebaseline_test_and_update_expectations, ["Webkit Mac10.7", "userscripts/another-test.html", None], expected_stdout=expected_stdout)
+
+ new_expectations = tool.filesystem.read_text_file(lion_port.path_to_test_expectations_file())
+ self.assertEqual(new_expectations, "BUGX LEOPARD SNOWLEOPARD : userscripts/another-test.html = IMAGE\nBUGZ LINUX : userscripts/another-test.html = IMAGE\n")
+
def test_rebaseline_test(self):
- command = RebaselineTest()
- command.bind_to_tool(MockTool())
- expected_stdout = "Retrieving http://example.com/f/builders/Webkit Linux/results/layout-test-results/userscripts/another-test-actual.txt.\n"
+ command, _ = self.stub_rebaseline_test_command_and_tool()
+ expected_stdout = "Retrieving http://example.com/f/builders/Webkit Linux/layout-test-results.zip\n"
OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Linux", "userscripts/another-test.html", None, "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_test(self):
- command = RebaselineTest()
- tool = MockTool()
- command.bind_to_tool(tool)
+ command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(os.path.join(lion_port.layout_tests_dir(), "userscripts/another-test-expected.txt"), "Dummy expected result")
expected_stdout = """Copying baseline from /mock-checkout/LayoutTests/userscripts/another-test-expected.txt to /mock-checkout/LayoutTests/platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt.
-Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.txt.
+Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
- OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", "chromium-mac-snowleopard", "txt"], expected_stdout=expected_stdout)
+ OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_test_no_existing_result(self):
- command = RebaselineTest()
- tool = MockTool()
- command.bind_to_tool(tool)
+ command, _ = self.stub_rebaseline_test_command_and_tool()
expected_stdout = """No existing baseline for userscripts/another-test.html.
-Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.txt.
+Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
- OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", "chromium-mac-snowleopard", "txt"], expected_stdout=expected_stdout)
+ OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_test_with_lion_result(self):
- command = RebaselineTest()
- tool = MockTool()
- command.bind_to_tool(tool)
+ command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(os.path.join(lion_port.baseline_path(), "userscripts/another-test-expected.txt"), "Dummy expected result")
expected_stdout = """Copying baseline from /mock-checkout/LayoutTests/platform/chromium-mac/userscripts/another-test-expected.txt to /mock-checkout/LayoutTests/platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt.
-Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.txt.
+Copying baseline from /mock-checkout/LayoutTests/platform/chromium-mac/userscripts/another-test-expected.txt to /mock-checkout/LayoutTests/platform/chromium-mac-leopard/userscripts/another-test-expected.txt.
+Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
- OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", "chromium-mac-snowleopard", "txt"], expected_stdout=expected_stdout)
+ OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard", "chromium-mac-leopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_and_copy_no_overwrite_test(self):
- command = RebaselineTest()
- tool = MockTool()
- command.bind_to_tool(tool)
+ command, tool = self.stub_rebaseline_test_command_and_tool()
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
tool.filesystem.write_text_file(os.path.join(lion_port.baseline_path(), "userscripts/another-test-expected.txt"), "Dummy expected result")
@@ -96,9 +133,9 @@ Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-resu
tool.filesystem.write_text_file(os.path.join(snowleopard_port.baseline_path(), "userscripts/another-test-expected.txt"), "Dummy expected result")
expected_stdout = """Existing baseline at /mock-checkout/LayoutTests/platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt, not copying over it.
-Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.txt.
+Retrieving http://example.com/f/builders/Webkit Mac10.7/layout-test-results.zip
"""
- OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", "chromium-mac-snowleopard", "txt"], expected_stdout=expected_stdout)
+ OutputCapture().assert_outputs(self, command._rebaseline_test, ["Webkit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt"], expected_stdout=expected_stdout)
def test_rebaseline_expectations(self):
command = RebaselineExpectations()
@@ -166,5 +203,5 @@ MOCK run_command: ['echo', 'rebaseline-test', 'Webkit Win', 'userscripts/images.
"MOCK run_command: ['echo', 'optimize-baselines', 'userscripts/another-test.html'], cwd=/mock-checkout\n"
"MOCK run_command: ['echo', 'optimize-baselines', 'userscripts/images.svg'], cwd=/mock-checkout\n")
- command._tests_to_rebaseline = lambda port: [] if not port.name().find('-gpu-') == -1 else ['userscripts/another-test.html', 'userscripts/images.svg']
+ command._tests_to_rebaseline = lambda port: ['userscripts/another-test.html', 'userscripts/images.svg']
OutputCapture().assert_outputs(self, command.execute, [MockOptions(optimize=True), [], tool], expected_stdout=expected_stdout_with_optimize, expected_stderr=expected_stderr_with_optimize)
diff --git a/Tools/Scripts/webkitpy/tool/mocktool.py b/Tools/Scripts/webkitpy/tool/mocktool.py
index 25f82698a..55fde64ad 100644
--- a/Tools/Scripts/webkitpy/tool/mocktool.py
+++ b/Tools/Scripts/webkitpy/tool/mocktool.py
@@ -47,8 +47,11 @@ class MockOptions(object):
# object will be used. Generally speaking unit tests should
# subclass this or provider wrapper functions that set a common
# set of options.
- for key, value in kwargs.items():
- self.__dict__[key] = value
+ self.update(**kwargs)
+
+ def update(self, **kwargs):
+ self.__dict__.update(**kwargs)
+ return self
# FIXME: This should be renamed MockWebKitPatch.
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
index 17e543780..3d09b5f76 100644
--- a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
@@ -142,9 +142,7 @@ class GardeningHTTPRequestHandler(ReflectionHandler):
builder,
self.query['test'][0],
]
- fallback_port = builders.fallback_port_name_for_new_port(builder)
- if fallback_port:
- command.append(fallback_port)
+ command.extend(builders.fallback_port_names_for_new_port(builder))
self._run_webkit_patch(command)
self._serve_text('success')
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
index f41a38a78..13e9001b7 100644
--- a/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
@@ -89,12 +89,10 @@ class BuildCoverageExtrapolatorTest(unittest.TestCase):
port = host.port_factory.get('chromium-win-win7', None)
converter = TestConfigurationConverter(port.all_test_configurations(), port.configuration_specifier_macros())
extrapolator = BuildCoverageExtrapolator(converter)
- self.assertEquals(extrapolator.extrapolate_test_configurations("Webkit Win"), set([TestConfiguration(version='xp', architecture='x86', build_type='release', graphics_type='cpu')]))
+ self.assertEquals(extrapolator.extrapolate_test_configurations("Webkit Win"), set([TestConfiguration(version='xp', architecture='x86', build_type='release')]))
self.assertEquals(extrapolator.extrapolate_test_configurations("Webkit Vista"), set([
- TestConfiguration(version='vista', architecture='x86', build_type='debug', graphics_type='cpu'),
- TestConfiguration(version='vista', architecture='x86', build_type='debug', graphics_type='gpu'),
- TestConfiguration(version='vista', architecture='x86', build_type='release', graphics_type='gpu'),
- TestConfiguration(version='vista', architecture='x86', build_type='release', graphics_type='cpu')]))
+ TestConfiguration(version='vista', architecture='x86', build_type='debug'),
+ TestConfiguration(version='vista', architecture='x86', build_type='release')]))
self.assertRaises(KeyError, extrapolator.extrapolate_test_configurations, "Potato")
@@ -136,37 +134,38 @@ class GardeningExpectationsUpdaterTest(unittest.TestCase):
def test_unknown_failure_type(self):
failure_info_list = [{"testName": "failures/expected/image.html", "builderName": "Webkit Win", "failureTypeList": ["IMAGE", "EXPLODE"]}]
expectations_before = ""
- expectations_after = "\nBUG_NEW XP RELEASE CPU : failures/expected/image.html = IMAGE"
+ expectations_after = "\nBUG_NEW XP RELEASE : failures/expected/image.html = IMAGE"
self.assert_update(failure_info_list, expectations_before=expectations_before, expectations_after=expectations_after)
def test_add_new_expectation(self):
failure_info_list = [{"testName": "failures/expected/image.html", "builderName": "Webkit Win", "failureTypeList": ["IMAGE"]}]
expectations_before = ""
- expectations_after = "\nBUG_NEW XP RELEASE CPU : failures/expected/image.html = IMAGE"
+ expectations_after = "\nBUG_NEW XP RELEASE : failures/expected/image.html = IMAGE"
self.assert_update(failure_info_list, expectations_before=expectations_before, expectations_after=expectations_after)
def test_replace_old_expectation(self):
failure_info_list = [{"testName": "failures/expected/image.html", "builderName": "Webkit Win", "failureTypeList": ["IMAGE"]}]
- expectations_before = "BUG_OLD XP RELEASE CPU : failures/expected/image.html = TEXT"
- expectations_after = "BUG_NEW XP RELEASE CPU : failures/expected/image.html = IMAGE"
+ expectations_before = "BUG_OLD XP RELEASE : failures/expected/image.html = TEXT"
+ expectations_after = "BUG_NEW XP RELEASE : failures/expected/image.html = IMAGE"
self.assert_update(failure_info_list, expectations_before=expectations_before, expectations_after=expectations_after)
def test_pass_expectation(self):
failure_info_list = [{"testName": "failures/expected/image.html", "builderName": "Webkit Win", "failureTypeList": ["PASS"]}]
- expectations_before = "BUG_OLD XP RELEASE CPU : failures/expected/image.html = TEXT"
+ expectations_before = "BUG_OLD XP RELEASE : failures/expected/image.html = TEXT"
expectations_after = ""
self.assert_update(failure_info_list, expectations_before=expectations_before, expectations_after=expectations_after)
def test_supplement_old_expectation(self):
failure_info_list = [{"testName": "failures/expected/image.html", "builderName": "Webkit Win", "failureTypeList": ["IMAGE"]}]
- expectations_before = "BUG_OLD XP RELEASE : failures/expected/image.html = TEXT"
- expectations_after = "BUG_OLD XP RELEASE GPU : failures/expected/image.html = TEXT\nBUG_NEW XP RELEASE CPU : failures/expected/image.html = IMAGE"
+ expectations_before = "BUG_OLD XP RELEASE : failures/expected/text.html = TEXT"
+ expectations_after = ("BUG_OLD XP RELEASE : failures/expected/text.html = TEXT\n"
+ "BUG_NEW XP RELEASE : failures/expected/image.html = IMAGE")
self.assert_update(failure_info_list, expectations_before=expectations_before, expectations_after=expectations_after)
def test_spurious_updates(self):
failure_info_list = [{"testName": "failures/expected/image.html", "builderName": "Webkit Win", "failureTypeList": ["IMAGE"]}]
- expectations_before = "BUG_OLDER MAC LINUX : failures/expected/image.html = IMAGE+TEXT\nBUG_OLD XP RELEASE CPU : failures/expected/image.html = TEXT"
- expectations_after = "BUG_OLDER MAC LINUX : failures/expected/image.html = IMAGE+TEXT\nBUG_NEW XP RELEASE CPU : failures/expected/image.html = IMAGE"
+ expectations_before = "BUG_OLDER MAC LINUX : failures/expected/image.html = IMAGE+TEXT\nBUG_OLD XP RELEASE : failures/expected/image.html = TEXT"
+ expectations_after = "BUG_OLDER MAC LINUX : failures/expected/image.html = IMAGE+TEXT\nBUG_NEW XP RELEASE : failures/expected/image.html = IMAGE"
self.assert_update(failure_info_list, expectations_before=expectations_before, expectations_after=expectations_after)
@@ -189,8 +188,8 @@ class GardeningServerTest(unittest.TestCase):
self._post_to_path("/rebaseline?builder=MOCK+builder&test=user-scripts/another-test.html", expected_stderr=expected_stderr, expected_stdout=expected_stdout)
def test_rebaseline_new_port(self):
- builders._exact_matches = {"MOCK builder": {"port_name": "mock-port-name", "specifiers": set(["mock-specifier"]), "move_overwritten_baselines_to": "mock-port-fallback"}}
- expected_stderr = "MOCK run_command: ['echo', 'rebaseline-test', 'MOCK builder', 'user-scripts/another-test.html', 'mock-port-fallback'], cwd=/mock-checkout\n"
+ builders._exact_matches = {"MOCK builder": {"port_name": "mock-port-name", "specifiers": set(["mock-specifier"]), "move_overwritten_baselines_to": ["mock-port-fallback", "mock-port-fallback2"]}}
+ expected_stderr = "MOCK run_command: ['echo', 'rebaseline-test', 'MOCK builder', 'user-scripts/another-test.html', 'mock-port-fallback', 'mock-port-fallback2'], cwd=/mock-checkout\n"
expected_stdout = "== Begin Response ==\nsuccess\n== End Response ==\n"
self._post_to_path("/rebaseline?builder=MOCK+builder&test=user-scripts/another-test.html", expected_stderr=expected_stderr, expected_stdout=expected_stdout)
diff --git a/Tools/Scripts/webkitpy/tool/steps/commit.py b/Tools/Scripts/webkitpy/tool/steps/commit.py
index 9e69e7980..2f245e024 100644
--- a/Tools/Scripts/webkitpy/tool/steps/commit.py
+++ b/Tools/Scripts/webkitpy/tool/steps/commit.py
@@ -44,6 +44,7 @@ class Commit(AbstractStep):
def options(cls):
return AbstractStep.options() + [
Options.check_builders,
+ Options.non_interactive,
]
def _commit_warning(self, error):
@@ -66,6 +67,8 @@ class Commit(AbstractStep):
try:
self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
+ if self._options.non_interactive:
+ raise
if not self._tool.user.confirm("Are you sure you want to continue?", default="n"):
self._exit(1)
@@ -94,12 +97,14 @@ class Commit(AbstractStep):
self._state["commit_text"] = commit_text
break;
except AmbiguousCommitError, e:
- if self._tool.user.confirm(self._commit_warning(e)):
+ if self._options.non_interactive or self._tool.user.confirm(self._commit_warning(e)):
force_squash = True
else:
# This will correctly interrupt the rest of the commit process.
raise ScriptError(message="Did not commit")
except AuthenticationError, e:
+ if self._options.non_interactive:
+ raise ScriptError(message="Authentication required")
username = self._tool.user.prompt("%s login: " % e.server_host, repeat=5)
if not username:
raise ScriptError("You need to specify the username on %s to perform the commit as." % e.server_host)
diff --git a/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py b/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
index 43f366492..50dfaea2b 100644
--- a/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
@@ -29,6 +29,7 @@
import unittest
from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.commit import Commit
@@ -39,8 +40,10 @@ class CommitTest(unittest.TestCase):
capture = OutputCapture()
options = MockOptions()
options.git_commit = ""
+ options.non_interactive = True
tool = MockTool()
+ tool.user = None # Will cause any access of tool.user to raise an exception.
step = Commit(tool, options)
state = {
"changed_files": ["test_expectations.txtXXX"],
@@ -55,4 +58,4 @@ class CommitTest(unittest.TestCase):
capture.assert_outputs(self, step.run, [state], expected_stderr="MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--diff-files', 'platform/chromium/test_expectations.txt'], cwd=/mock-checkout\nCommitted r49824: <http://trac.webkit.org/changeset/49824>\n")
tool.executive = MockExecutive(should_log=True, should_throw_when_run=set(["platform/chromium/test_expectations.txt"]))
- self.assertRaises(SystemExit, capture.assert_outputs, self, step.run, [state])
+ self.assertRaises(ScriptError, capture.assert_outputs, self, step.run, [state])
diff --git a/Tools/Scripts/webkitpy/tool/steps/createbug.py b/Tools/Scripts/webkitpy/tool/steps/createbug.py
index 0ab6f68a2..2638d1973 100644
--- a/Tools/Scripts/webkitpy/tool/steps/createbug.py
+++ b/Tools/Scripts/webkitpy/tool/steps/createbug.py
@@ -50,3 +50,5 @@ class CreateBug(AbstractStep):
if not blocks:
blocks = state.get("bug_blocked")
state["bug_id"] = self._tool.bugs.create_bug(state["bug_title"], state["bug_description"], blocked=blocks, component=self._options.component, cc=cc)
+ if blocks:
+ self._tool.bugs.reopen_bug(blocks, "Re-opened since this is blocked by %s" % state["bug_id"])
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests.py b/Tools/Scripts/webkitpy/tool/steps/runtests.py
index 758e36b04..ac5493b4e 100644
--- a/Tools/Scripts/webkitpy/tool/steps/runtests.py
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests.py
@@ -29,6 +29,7 @@
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.deprecated_logging import log
+from webkitpy.common.system.executive import ScriptError
class RunTests(AbstractStep):
# FIXME: This knowledge really belongs in the commit-queue.
@@ -64,10 +65,16 @@ class RunTests(AbstractStep):
log("Running JavaScriptCore tests")
self._tool.executive.run_and_throw_if_fail(javascriptcore_tests_command, quiet=True, cwd=self._tool.scm().checkout_root)
- webkit_unit_tests_command = self._tool.port().run_webkit_unit_tests_command()
- if webkit_unit_tests_command:
- log("Running WebKit unit tests")
- self._tool.executive.run_and_throw_if_fail(webkit_unit_tests_command, cwd=self._tool.scm().checkout_root)
+ webkit_unit_tests_command = self._tool.port().run_webkit_unit_tests_command()
+ if webkit_unit_tests_command:
+ log("Running WebKit unit tests")
+ args = webkit_unit_tests_command
+ if self._options.non_interactive:
+ args.append("--gtest_output=xml:%s/webkit_unit_tests_output.xml" % self._tool.port().results_directory)
+ try:
+ self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
+ except ScriptError, e:
+ log("Error running webkit_unit_tests: %s" % e.message_with_output())
log("Running run-webkit-tests")
args = self._tool.port().run_webkit_tests_command()
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
index 972ffb977..fd89ca946 100644
--- a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
@@ -33,12 +33,14 @@ from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.runtests import RunTests
class RunTestsTest(unittest.TestCase):
- def test_no_unit_tests(self):
+ def test_webkit_run_unit_tests(self):
tool = MockTool(log_executive=True)
tool._deprecated_port.run_python_unittests_command = lambda: None
tool._deprecated_port.run_perl_unittests_command = lambda: None
step = RunTests(tool, MockOptions(test=True, non_interactive=True, quiet=False))
- expected_stderr = """Running run-webkit-tests
+ expected_stderr = """Running WebKit unit tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests', '--gtest_output=xml:/mock-results/webkit_unit_tests_output.xml'], cwd=/mock-checkout
+Running run-webkit-tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--skip-failing-tests', '--exit-after-n-failures=30', '--results-directory=/mock-results', '--print=actual,config,expected,misc,slowest,unexpected,unexpected-results'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)