summaryrefslogtreecommitdiff
path: root/Tools/Scripts
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@digia.com>2013-09-13 12:51:20 +0200
committerThe Qt Project <gerrit-noreply@qt-project.org>2013-09-19 20:50:05 +0200
commitd441d6f39bb846989d95bcf5caf387b42414718d (patch)
treee367e64a75991c554930278175d403c072de6bb8 /Tools/Scripts
parent0060b2994c07842f4c59de64b5e3e430525c4b90 (diff)
downloadqtwebkit-d441d6f39bb846989d95bcf5caf387b42414718d.tar.gz
Import Qt5x2 branch of QtWebkit for Qt 5.2
Importing a new snapshot of webkit. Change-Id: I2d01ad12cdc8af8cb015387641120a9d7ea5f10c Reviewed-by: Allan Sandfeld Jensen <allan.jensen@digia.com>
Diffstat (limited to 'Tools/Scripts')
-rw-r--r--Tools/Scripts/VCSUtils.pm109
-rwxr-xr-xTools/Scripts/bisect-builds10
-rwxr-xr-xTools/Scripts/build-api-tests6
-rwxr-xr-xTools/Scripts/build-dumprendertree9
-rwxr-xr-xTools/Scripts/build-jsc7
-rwxr-xr-xTools/Scripts/build-webkit110
-rwxr-xr-xTools/Scripts/check-for-inappropriate-macros-in-external-headers77
-rwxr-xr-xTools/Scripts/check-for-webkit-framework-include-consistency7
-rwxr-xr-xTools/Scripts/check-inspector-strings3
-rwxr-xr-xTools/Scripts/detect-mismatched-virtual-const167
-rwxr-xr-xTools/Scripts/display-profiler-output938
-rwxr-xr-xTools/Scripts/do-webcore-rename8
-rwxr-xr-xTools/Scripts/dump-webkit-tests-run52
-rwxr-xr-xTools/Scripts/export-w3c-performance-wg-tests4
-rwxr-xr-xTools/Scripts/extract-localizable-js-strings158
-rwxr-xr-xTools/Scripts/extract-localizable-strings1
-rwxr-xr-xTools/Scripts/filter-build-webkit61
-rwxr-xr-xTools/Scripts/generate-coverage-data163
-rwxr-xr-xTools/Scripts/import-w3c-performance-wg-tests2
-rwxr-xr-xTools/Scripts/import-w3c-tests35
-rwxr-xr-x[-rw-r--r--]Tools/Scripts/lint-test-expectations (renamed from Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py)21
-rwxr-xr-xTools/Scripts/new-run-webkit-httpd2
-rwxr-xr-xTools/Scripts/new-run-webkit-websocketserver9
-rwxr-xr-xTools/Scripts/old-run-webkit-tests57
-rwxr-xr-xTools/Scripts/pdevenv45
-rwxr-xr-xTools/Scripts/prepare-ChangeLog24
-rwxr-xr-xTools/Scripts/print-msvc-project-dependencies143
-rwxr-xr-xTools/Scripts/print-vse-failure-logs113
-rwxr-xr-xTools/Scripts/run-api-tests24
-rwxr-xr-xTools/Scripts/run-bindings-tests1
-rwxr-xr-xTools/Scripts/run-chromium-webkit-unit-tests52
-rwxr-xr-xTools/Scripts/run-efl-tests3
-rwxr-xr-xTools/Scripts/run-gtk-tests38
-rwxr-xr-xTools/Scripts/run-javascriptcore-tests15
-rwxr-xr-xTools/Scripts/run-jsc1
-rwxr-xr-xTools/Scripts/run-launcher17
-rwxr-xr-xTools/Scripts/run-qtwebkit-tests34
-rwxr-xr-xTools/Scripts/run-sunspider12
-rwxr-xr-xTools/Scripts/run-webkit-httpd2
-rwxr-xr-xTools/Scripts/run-webkit-tests26
-rwxr-xr-xTools/Scripts/sampstat98
-rwxr-xr-xTools/Scripts/svn-create-patch12
-rwxr-xr-xTools/Scripts/test-webkit-scripts5
-rwxr-xr-xTools/Scripts/test-webkitruby34
-rwxr-xr-xTools/Scripts/update-sources-list.py93
-rwxr-xr-xTools/Scripts/update-webkit30
-rwxr-xr-xTools/Scripts/update-webkit-chromium96
-rwxr-xr-xTools/Scripts/update-webkit-dependency2
-rwxr-xr-xTools/Scripts/update-webkit-libs-jhbuild10
-rwxr-xr-xTools/Scripts/update-webkit-localizable-strings12
-rwxr-xr-xTools/Scripts/update-webkit-support-libs2
-rwxr-xr-xTools/Scripts/update-webkit-wincairo-libs2
-rwxr-xr-xTools/Scripts/validate-committer-lists28
-rwxr-xr-xTools/Scripts/webkit-build-directory2
-rwxr-xr-xTools/Scripts/webkit-tools-completion.sh2
-rwxr-xr-xTools/Scripts/webkitdirs.pm695
-rw-r--r--Tools/Scripts/webkitperl/FeatureList.pm189
-rw-r--r--Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl36
-rw-r--r--Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl181
-rw-r--r--Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl228
-rw-r--r--Tools/Scripts/webkitperl/filter-build-webkit_unittest/shouldIgnoreLine_unittests.pl120
-rw-r--r--Tools/Scripts/webkitperl/httpd.pm18
-rw-r--r--Tools/Scripts/webkitperl/prepare-ChangeLog_unittest/resources/cpp_unittests-expected.txt135
-rw-r--r--Tools/Scripts/webkitpy/bindings/main.py22
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py56
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/changelog.py238
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py160
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/checkout.py12
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/checkout_mock.py4
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py23
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/commitinfo.py2
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/deps.py61
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/diff_parser.py29
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py87
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/git.py86
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/scm.py64
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py24
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py472
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/svn.py49
-rw-r--r--Tools/Scripts/webkitpy/common/config/build.py135
-rw-r--r--Tools/Scripts/webkitpy/common/config/build_unittest.py70
-rw-r--r--Tools/Scripts/webkitpy/common/config/committers.py560
-rw-r--r--Tools/Scripts/webkitpy/common/config/committers_unittest.py35
-rw-r--r--Tools/Scripts/webkitpy/common/config/committervalidator.py10
-rw-r--r--Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/common/config/contributionareas.py2
-rw-r--r--Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/config/contributors.json4184
-rw-r--r--Tools/Scripts/webkitpy/common/config/ews.json56
-rw-r--r--Tools/Scripts/webkitpy/common/config/irc.py2
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports.py117
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports_mock.py11
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports_unittest.py45
-rw-r--r--Tools/Scripts/webkitpy/common/config/urls.py7
-rw-r--r--Tools/Scripts/webkitpy/common/config/urls_unittest.py14
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/common/config/watchlist219
-rw-r--r--Tools/Scripts/webkitpy/common/editdistance_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/find_files.py1
-rw-r--r--Tools/Scripts/webkitpy/common/find_files_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/common/host.py70
-rw-r--r--Tools/Scripts/webkitpy/common/host_mock.py12
-rw-r--r--Tools/Scripts/webkitpy/common/lru_cache.py1
-rw-r--r--Tools/Scripts/webkitpy/common/lru_cache_unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/common/memoized_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/message_pool.py2
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py4
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bug.py11
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py38
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py18
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py7
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py22
-rw-r--r--Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py6
-rw-r--r--Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py28
-rw-r--r--Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py47
-rw-r--r--Tools/Scripts/webkitpy/common/net/credentials_unittest.py10
-rw-r--r--Tools/Scripts/webkitpy/common/net/failuremap_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/net/file_uploader.py1
-rw-r--r--Tools/Scripts/webkitpy/common/net/htdigestparser.py54
-rw-r--r--Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py82
-rw-r--r--Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/net/layouttestresults.py91
-rw-r--r--Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py109
-rw-r--r--Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/net/omahaproxy.py81
-rw-r--r--Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py139
-rw-r--r--Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/net/statusserver.py2
-rw-r--r--Tools/Scripts/webkitpy/common/net/statusserver_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py10
-rw-r--r--Tools/Scripts/webkitpy/common/net/web_mock.py5
-rw-r--r--Tools/Scripts/webkitpy/common/newstringio.py1
-rw-r--r--Tools/Scripts/webkitpy/common/newstringio_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/common/prettypatch_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/common/read_checksum_from_png.py1
-rw-r--r--Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py9
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/common/system/autoinstall.py55
-rw-r--r--Tools/Scripts/webkitpy/common/system/crashlogs.py41
-rw-r--r--Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py222
-rw-r--r--Tools/Scripts/webkitpy/common/system/environment_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/executive.py109
-rw-r--r--Tools/Scripts/webkitpy/common/system/executive_mock.py11
-rw-r--r--Tools/Scripts/webkitpy/common/system/executive_unittest.py41
-rw-r--r--Tools/Scripts/webkitpy/common/system/file_lock.py1
-rw-r--r--Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py3
-rw-r--r--Tools/Scripts/webkitpy/common/system/file_lock_mock.py5
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem_mock.py4
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem_unittest.py14
-rw-r--r--Tools/Scripts/webkitpy/common/system/logtesting.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/logutils_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/outputcapture.py15
-rw-r--r--Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/common/system/outputtee_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/path_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/platforminfo.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py16
-rw-r--r--Tools/Scripts/webkitpy/common/system/profiler.py143
-rw-r--r--Tools/Scripts/webkitpy/common/system/profiler_unittest.py28
-rw-r--r--Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/common/system/user_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/workspace.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/workspace_mock.py2
-rw-r--r--Tools/Scripts/webkitpy/common/system/workspace_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/version_check.py1
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py43
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py45
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py2
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py17
-rw-r--r--Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/common/webkit_finder.py2
-rw-r--r--Tools/Scripts/webkitpy/common/webkitunittest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py149
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py57
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager.py482
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py156
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py31
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py9
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py13
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py44
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py25
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py22
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py111
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py157
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/result_summary.py82
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py12
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py49
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py27
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_failures.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_input.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_results.py15
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py260
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py135
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium.py455
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py721
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py296
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py171
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py118
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py129
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py107
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py223
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py68
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium_win.py157
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py125
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/win.py99
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py15
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py317
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py711
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py10
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/layout_tests/servers/http_server.py12
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py65
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/layout_tests/servers/http_server_base_unittest.py (renamed from Tools/Scripts/make-gypi)62
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py41
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py167
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py99
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py1
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing.py237
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py168
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py395
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py349
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py271
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py562
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py621
-rw-r--r--Tools/Scripts/webkitpy/port/__init__.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/__init__.py)1
-rw-r--r--Tools/Scripts/webkitpy/port/apple.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/apple.py)28
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/port/base.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/base.py)161
-rw-r--r--Tools/Scripts/webkitpy/port/base_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py)70
-rw-r--r--Tools/Scripts/webkitpy/port/builders.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/builders.py)74
-rw-r--r--Tools/Scripts/webkitpy/port/builders_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py)5
-rw-r--r--Tools/Scripts/webkitpy/port/config.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/config.py)1
-rw-r--r--Tools/Scripts/webkitpy/port/config_standalone.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py)1
-rw-r--r--Tools/Scripts/webkitpy/port/config_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py)8
-rw-r--r--Tools/Scripts/webkitpy/port/driver.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/driver.py)72
-rw-r--r--Tools/Scripts/webkitpy/port/driver_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py)14
-rw-r--r--Tools/Scripts/webkitpy/port/efl.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/efl.py)33
-rw-r--r--Tools/Scripts/webkitpy/port/efl_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/efl_unittest.py)15
-rw-r--r--Tools/Scripts/webkitpy/port/factory.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/factory.py)21
-rw-r--r--Tools/Scripts/webkitpy/port/factory_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py)51
-rw-r--r--Tools/Scripts/webkitpy/port/gtk.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/gtk.py)53
-rw-r--r--Tools/Scripts/webkitpy/port/gtk_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py)50
-rw-r--r--Tools/Scripts/webkitpy/port/http_lock.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/http_lock.py)11
-rw-r--r--Tools/Scripts/webkitpy/port/http_lock_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py)3
-rw-r--r--Tools/Scripts/webkitpy/port/image_diff.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/image_diff.py)3
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/port/image_diff_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py)6
-rw-r--r--Tools/Scripts/webkitpy/port/leakdetector.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/leakdetector.py)0
-rw-r--r--Tools/Scripts/webkitpy/port/leakdetector_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/leakdetector_unittest.py)4
-rw-r--r--Tools/Scripts/webkitpy/port/mac.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/mac.py)91
-rw-r--r--Tools/Scripts/webkitpy/port/mac_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py)34
-rw-r--r--Tools/Scripts/webkitpy/port/mock_drt.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py)8
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/port/mock_drt_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py)11
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/port/port_testcase.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py)79
-rw-r--r--Tools/Scripts/webkitpy/port/pulseaudio_sanitizer.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py)4
-rw-r--r--Tools/Scripts/webkitpy/port/pulseaudio_sanitizer_mock.py (renamed from Tools/Scripts/webkitpy/common/checkout/deps_mock.py)22
-rw-r--r--Tools/Scripts/webkitpy/port/qt.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/qt.py)52
-rw-r--r--Tools/Scripts/webkitpy/port/qt_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py)40
-rw-r--r--Tools/Scripts/webkitpy/port/server_process.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/server_process.py)2
-rw-r--r--Tools/Scripts/webkitpy/port/server_process_mock.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py)0
-rw-r--r--Tools/Scripts/webkitpy/port/server_process_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py)12
-rw-r--r--Tools/Scripts/webkitpy/port/test.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/test.py)50
-rw-r--r--Tools/Scripts/webkitpy/port/win.py284
-rw-r--r--Tools/Scripts/webkitpy/port/win_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py)19
-rw-r--r--Tools/Scripts/webkitpy/port/xvfbdriver.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py)15
-rw-r--r--Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py (renamed from Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver_unittest.py)16
-rw-r--r--Tools/Scripts/webkitpy/style/checker.py35
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/style/checker_unittest.py11
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/changelog.py2
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/cmake.py150
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py90
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/common_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/cpp.py73
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py116
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/png_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/python.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/python_unittest.py19
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/python_unittest_falsepositives.py16
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/test_expectations.py1
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py18
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/text_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/xcodeproj.py2
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/style/checkers/xml_unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/style/error_handlers_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/style/filereader_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/style/filter_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/style/main_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/style/optparser_unittest.py18
-rw-r--r--Tools/Scripts/webkitpy/style/patchreader_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/test/finder_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/test/main.py2
-rw-r--r--Tools/Scripts/webkitpy/test/main_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/test/printer.py27
-rw-r--r--Tools/Scripts/webkitpy/test/runner_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/test/skip_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/__init__.py9
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/__init___unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py46
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py56
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py2
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py10
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py635
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py72
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py75
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py63
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py28
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py473
-rwxr-xr-xTools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py255
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py168
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py6
-rw-r--r--Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py7
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/botinfo.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py3
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py33
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py9
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/irc_command.py235
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py50
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py15
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py49
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py81
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/queueengine.py15
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py10
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/sheriff.py15
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/__init__.py7
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py8
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py8
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/bugfortest.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/bugsearch.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/chromechannels.py104
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py99
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/commandtest.py1
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/download.py60
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/download_unittest.py17
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py111
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py47
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/findusers.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/gardenomatic.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/newcommitbot.py172
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/newcommitbot_unittest.py129
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/openbugs.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queries.py94
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queries_unittest.py23
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues.py95
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues_unittest.py111
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queuestest.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline.py28
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py96
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/roll.py74
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/roll_unittest.py63
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/sheriffbot.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py18
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/suggestnominations.py272
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py42
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/upload.py34
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/tool/comments.py0
-rwxr-xr-xTools/Scripts/webkitpy/tool/gcovr1029
-rw-r--r--Tools/Scripts/webkitpy/tool/grammar_unittest.py5
-rw-r--r--[-rwxr-xr-x]Tools/Scripts/webkitpy/tool/main.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/mocktool.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/mocktool_unittest.py6
-rw-r--r--Tools/Scripts/webkitpy/tool/multicommandtool.py24
-rw-r--r--Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py23
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/gardeningserver.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py23
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/__init__.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/build.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/checkstyle.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py12
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py30
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/commit.py16
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/commit_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/discardlocalchanges.py (renamed from Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py)28
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/discardlocalchanges_unittest.py97
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/haslanded.py120
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/haslanded_unittest.py299
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/options.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/preparechangelog.py60
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py94
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py24
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/runtests.py48
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py23
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/steps_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py7
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/update.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/update_unittest.py26
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py77
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py15
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py15
-rw-r--r--Tools/Scripts/webkitpy/w3c/__init__.py1
-rw-r--r--Tools/Scripts/webkitpy/w3c/test_converter.py193
-rw-r--r--Tools/Scripts/webkitpy/w3c/test_converter_unittest.py319
-rw-r--r--Tools/Scripts/webkitpy/w3c/test_importer.py450
-rw-r--r--Tools/Scripts/webkitpy/w3c/test_importer_unittest.py79
-rw-r--r--Tools/Scripts/webkitpy/w3c/test_parser.py162
-rw-r--r--Tools/Scripts/webkitpy/w3c/test_parser_unittest.py217
-rw-r--r--Tools/Scripts/webkitpy/webkitpy.pyproj538
-rw-r--r--Tools/Scripts/webkitpy/webkitpy.sln18
-rw-r--r--Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt11
-rw-r--r--Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt0
-rw-r--r--Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h29
-rw-r--r--Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h6
-rwxr-xr-xTools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb74
436 files changed, 19284 insertions, 13160 deletions
diff --git a/Tools/Scripts/VCSUtils.pm b/Tools/Scripts/VCSUtils.pm
index b3b8ec290..305d65bc0 100644
--- a/Tools/Scripts/VCSUtils.pm
+++ b/Tools/Scripts/VCSUtils.pm
@@ -1,4 +1,4 @@
-# Copyright (C) 2007, 2008, 2009 Apple Inc. All rights reserved.
+# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
# Copyright (C) 2009, 2010 Chris Jerdonek (chris.jerdonek@gmail.com)
# Copyright (C) 2010, 2011 Research In Motion Limited. All rights reserved.
# Copyright (C) 2012 Daniel Bates (dbates@intudata.com)
@@ -107,8 +107,11 @@ my $svnVersion;
# Project time zone for Cupertino, CA, US
my $changeLogTimeZone = "PST8PDT";
-my $gitDiffStartRegEx = qr#^diff --git (\w/)?(.+) (\w/)?([^\r\n]+)#;
+my $gitDiffStartRegEx = qr#^diff --git [^\r\n]+#;
+my $gitDiffStartWithPrefixRegEx = qr#^diff --git \w/(.+) \w/([^\r\n]+)#; # We suppose that --src-prefix and --dst-prefix don't contain a non-word character (\W) and end with '/'.
+my $gitDiffStartWithoutPrefixNoSpaceRegEx = qr#^diff --git (\S+) (\S+)$#;
my $svnDiffStartRegEx = qr#^Index: ([^\r\n]+)#;
+my $gitDiffStartWithoutPrefixSourceDirectoryPrefixRegExp = qr#^diff --git ([^/]+/)#;
my $svnPropertiesStartRegEx = qr#^Property changes on: ([^\r\n]+)#; # $1 is normally the same as the index path.
my $svnPropertyStartRegEx = qr#^(Modified|Name|Added|Deleted): ([^\r\n]+)#; # $2 is the name of the property.
my $svnPropertyValueStartRegEx = qr#^\s*(\+|-|Merged|Reverse-merged)\s*([^\r\n]+)#; # $2 is the start of the property's value (which may span multiple lines).
@@ -233,11 +236,33 @@ sub isGitSVN()
return $isGitSVN;
}
+sub gitDirectory()
+{
+ chomp(my $result = `git rev-parse --git-dir`);
+ return $result;
+}
+
+sub gitBisectStartBranch()
+{
+ my $bisectStartFile = File::Spec->catfile(gitDirectory(), "BISECT_START");
+ if (!-f $bisectStartFile) {
+ return "";
+ }
+ open(BISECT_START, $bisectStartFile) or die "Failed to open $bisectStartFile: $!";
+ chomp(my $result = <BISECT_START>);
+ close(BISECT_START);
+ return $result;
+}
+
sub gitBranch()
{
unless (defined $gitBranch) {
chomp($gitBranch = `git symbolic-ref -q HEAD`);
- $gitBranch = "" if exitStatus($?);
+ my $hasDetachedHead = exitStatus($?);
+ if ($hasDetachedHead) {
+ # We may be in a git bisect session.
+ $gitBranch = gitBisectStartBranch();
+ }
$gitBranch =~ s#^refs/heads/##;
$gitBranch = "" if $gitBranch eq "master";
}
@@ -302,12 +327,6 @@ sub chdirReturningRelativePath($)
return File::Spec->abs2rel($previousDirectory, $newDirectory);
}
-sub determineGitRoot()
-{
- chomp(my $gitDir = `git rev-parse --git-dir`);
- return dirname($gitDir);
-}
-
sub determineSVNRoot()
{
my $last = '';
@@ -356,7 +375,7 @@ sub determineSVNRoot()
sub determineVCSRoot()
{
if (isGit()) {
- return determineGitRoot();
+ return dirname(gitDirectory());
}
if (!isSVN()) {
@@ -615,6 +634,32 @@ sub isExecutable($)
return $fileMode % 2;
}
+# Parse the Git diff header start line.
+#
+# Args:
+# $line: "diff --git" line.
+#
+# Returns the path of the target file.
+sub parseGitDiffStartLine($)
+{
+ my $line = shift;
+ $_ = $line;
+ if (/$gitDiffStartWithPrefixRegEx/ || /$gitDiffStartWithoutPrefixNoSpaceRegEx/) {
+ return $2;
+ }
+ # Assume the diff was generated with --no-prefix (e.g. git diff --no-prefix).
+ if (!/$gitDiffStartWithoutPrefixSourceDirectoryPrefixRegExp/) {
+ # FIXME: Moving top directory file is not supported (e.g diff --git A.txt B.txt).
+ die("Could not find '/' in \"diff --git\" line: \"$line\"; only non-prefixed git diffs (i.e. not generated with --no-prefix) that move a top-level directory file are supported.");
+ }
+ my $pathPrefix = $1;
+ if (!/^diff --git \Q$pathPrefix\E.+ (\Q$pathPrefix\E.+)$/) {
+ # FIXME: Moving a file through sub directories of top directory is not supported (e.g diff --git A/B.txt C/B.txt).
+ die("Could not find '/' in \"diff --git\" line: \"$line\"; only non-prefixed git diffs (i.e. not generated with --no-prefix) that move a file between top-level directories are supported.");
+ }
+ return $1;
+}
+
# Parse the next Git diff header from the given file handle, and advance
# the handle so the last line read is the first line after the header.
#
@@ -656,12 +701,15 @@ sub parseGitDiffHeader($$)
my $indexPath;
if (/$gitDiffStartRegEx/) {
+ # Use $POSTMATCH to preserve the end-of-line character.
+ my $eol = $POSTMATCH;
+
# The first and second paths can differ in the case of copies
# and renames. We use the second file path because it is the
# destination path.
- $indexPath = adjustPathForRecentRenamings($4);
- # Use $POSTMATCH to preserve the end-of-line character.
- $_ = "Index: $indexPath$POSTMATCH"; # Convert to SVN format.
+ $indexPath = adjustPathForRecentRenamings(parseGitDiffStartLine($_));
+
+ $_ = "Index: $indexPath$eol"; # Convert to SVN format.
} else {
die("Could not parse leading \"diff --git\" line: \"$line\".");
}
@@ -690,9 +738,9 @@ sub parseGitDiffHeader($$)
$isNew = 1 if $1;
} elsif (/^similarity index (\d+)%/) {
$similarityIndex = $1;
- } elsif (/^copy from (\S+)/) {
+ } elsif (/^copy from ([^\t\r\n]+)/) {
$copiedFromPath = $1;
- } elsif (/^rename from (\S+)/) {
+ } elsif (/^rename from ([^\t\r\n]+)/) {
# FIXME: Record this as a move rather than as a copy-and-delete.
# This will simplify adding rename support to svn-unapply.
# Otherwise, the hash for a deletion would have to know
@@ -702,9 +750,17 @@ sub parseGitDiffHeader($$)
$copiedFromPath = $1;
$shouldDeleteSource = 1;
} elsif (/^--- \S+/) {
- $_ = "--- $indexPath"; # Convert to SVN format.
+ # Convert to SVN format.
+ # We emit the suffix "\t(revision 0)" to handle $indexPath which contains a space character.
+ # The patch(1) command thinks a file path is characters before a tab.
+ # This suffix make our diff more closely match the SVN diff format.
+ $_ = "--- $indexPath\t(revision 0)";
} elsif (/^\+\+\+ \S+/) {
- $_ = "+++ $indexPath"; # Convert to SVN format.
+ # Convert to SVN format.
+ # We emit the suffix "\t(working copy)" to handle $indexPath which contains a space character.
+ # The patch(1) command thinks a file path is characters before a tab.
+ # This suffix make our diff more closely match the SVN diff format.
+ $_ = "+++ $indexPath\t(working copy)";
$foundHeaderEnding = 1;
} elsif (/^GIT binary patch$/ ) {
$isBinary = 1;
@@ -1005,13 +1061,23 @@ sub parseDiff($$;$)
# Then we are in the body of the diff.
my $isChunkRange = defined(parseChunkRange($line));
$numTextChunks += 1 if $isChunkRange;
+ my $nextLine = <$fileHandle>;
+ my $willAddNewLineAtEndOfFile = defined($nextLine) && $nextLine =~ /^\\ No newline at end of file$/;
+ if ($willAddNewLineAtEndOfFile) {
+ # Diff(1) always emits a LF character preceeding the line "\ No newline at end of file".
+ # We must preserve both the added LF character and the line ending of this sentinel line
+ # or patch(1) will complain.
+ $svnText .= $line . $nextLine;
+ $line = <$fileHandle>;
+ next;
+ }
if ($indexPathEOL && !$isChunkRange) {
# The chunk range is part of the body of the diff, but its line endings should't be
# modified or patch(1) will complain. So, we only modify non-chunk range lines.
$line =~ s/\r\n|\r|\n/$indexPathEOL/g;
}
$svnText .= $line;
- $line = <$fileHandle>;
+ $line = $nextLine;
next;
} # Otherwise, we found a diff header.
@@ -1023,6 +1089,10 @@ sub parseDiff($$;$)
($headerHashRef, $line) = parseDiffHeader($fileHandle, $line);
if (!$optionsHashRef || !$optionsHashRef->{shouldNotUseIndexPathEOL}) {
+ # FIXME: We shouldn't query the file system (via firstEOLInFile()) to determine the
+ # line endings of the file indexPath. Instead, either the caller to parseDiff()
+ # should provide this information or parseDiff() should take a delegate that it
+ # can use to query for this information.
$indexPathEOL = firstEOLInFile($headerHashRef->{indexPath}) if !$headerHashRef->{isNew} && !$headerHashRef->{isBinary};
}
@@ -1815,9 +1885,6 @@ sub gitConfig($)
my ($config) = @_;
my $result = `git config $config`;
- if (($? >> 8)) {
- $result = `git repo-config $config`;
- }
chomp $result;
return $result;
}
diff --git a/Tools/Scripts/bisect-builds b/Tools/Scripts/bisect-builds
index b970db399..301d9fd85 100755
--- a/Tools/Scripts/bisect-builds
+++ b/Tools/Scripts/bisect-builds
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
-# Copyright (C) 2007, 2008, 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2007, 2008, 2009, 2011, 2012, 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -187,7 +187,13 @@ while (abs($endIndex - $startIndex) > 1) {
$brokenRevisions{$nightlies[$index]->{rev}} = $nightlies[$index]->{file};
delete $nightlies[$index];
$endIndex--;
- $index = $startIndex + int(($endIndex - $startIndex) / 2);
+ if (scalar(keys %brokenRevisions) % 2 == 0) {
+ # Even tries to bisect to the left
+ $index = int(($startIndex + $index) / 2);
+ } else {
+ # Odd tries to bisect to the right
+ $index = int(($index + $endIndex) / 2);
+ }
}
} while ($didReproduceBug < 0);
diff --git a/Tools/Scripts/build-api-tests b/Tools/Scripts/build-api-tests
index 09d19bf23..d3ec0bbbf 100755
--- a/Tools/Scripts/build-api-tests
+++ b/Tools/Scripts/build-api-tests
@@ -65,10 +65,8 @@ if (isAppleMacWebKit()) {
$result = buildXCodeProject("TestWebKitAPI", $clean, XcodeOptions(), @ARGV);
} elsif (isAppleWinWebKit()) {
chdir "Tools/TestWebKitAPI" or die;
- $result = buildVisualStudioProject("win/TestWebKitAPI.sln", $clean);
-} elsif (isChromium()) {
- # Chromium build everything in one shot. No need to build anything here.
- $result = 0;
+ my $solutionPath = "TestWebKitAPI.vcxproj/TestWebKitAPI.sln";
+ $result = buildVisualStudioProject($solutionPath, $clean);
} else {
die "TestWebKitAPI is not supported on this platform.\n";
}
diff --git a/Tools/Scripts/build-dumprendertree b/Tools/Scripts/build-dumprendertree
index 4e206f4bc..1ff75f7d3 100755
--- a/Tools/Scripts/build-dumprendertree
+++ b/Tools/Scripts/build-dumprendertree
@@ -44,8 +44,6 @@ Usage: $programName [options] [options to pass to build system]
--clean Clean up the build directory
--gtk Build the GTK+ port
--qt Build the Qt port
- --wx Build the wxWindows port
- --chromium Build the Chromium port
--efl Build the EFL port
EOF
@@ -70,9 +68,10 @@ my $result;
if (isAppleMacWebKit()) {
$result = buildXCodeProject("DumpRenderTree", $clean, XcodeOptions(), @ARGV);
} elsif (isAppleWinWebKit()) {
- $result = buildVisualStudioProject("DumpRenderTree.sln", $clean);
-} elsif (isQt() || isGtk() || isWx() || isChromium() || isEfl()) {
- # Qt, Gtk wxWindows, Chromium and EFL build everything in one shot. No need to build anything here.
+ my $drtSolutionPath = "DumpRenderTree.vcxproj/DumpRenderTree.sln";
+ $result = buildVisualStudioProject($drtSolutionPath, $clean);
+} elsif (isQt() || isGtk() || isEfl()) {
+ # Qt, Gtk and EFL build everything in one shot. No need to build anything here.
$result = 0;
} else {
die "Building not defined for this platform!\n";
diff --git a/Tools/Scripts/build-jsc b/Tools/Scripts/build-jsc
index 3fbf43fe1..6b29b7f21 100755
--- a/Tools/Scripts/build-jsc
+++ b/Tools/Scripts/build-jsc
@@ -82,13 +82,10 @@ sub buildMyProject
if (isAppleMacWebKit()) {
$result = system "sh", "-c", ('xcodebuild -project ' . $projectName . '.xcodeproj "$@" | grep -v setenv && exit ${PIPESTATUS[0]}'), "xcodebuild", @options, @ARGV, @coverageSupportOptions;
} elsif (isAppleWinWebKit()) {
- $result = buildVisualStudioProject("$projectName.vcproj/$projectName.sln");
+ $result = buildVisualStudioProject("$projectName.vcxproj/$projectName.submit.sln");
} elsif (isGtk()) {
checkForArgumentAndRemoveFromARGV("--gtk");
- $result = buildGtkProject($projectName, 0, @ARGV);
- } elsif (isWx()) {
- # Builds everything in one-shot. No need to build anything here.
- $result = 0;
+ $result = buildGtkProject($projectName, 0);
} else {
die "Building not defined for this platform!\n";
}
diff --git a/Tools/Scripts/build-webkit b/Tools/Scripts/build-webkit
index 3e53fa26d..69f983326 100755
--- a/Tools/Scripts/build-webkit
+++ b/Tools/Scripts/build-webkit
@@ -51,14 +51,14 @@ chdirWebKit();
my $showHelp = 0;
my $clean = 0;
-my $useGYP = 0;
my $minimal = 0;
my $installHeaders;
my $installLibs;
my $prefixPath;
my $makeArgs = "";
-my $cmakeArgs;
+my $cmakeArgs = "";
my $onlyWebKitProject = 0;
+my $noWebKit1 = 0;
my $noWebKit2 = 0;
my $coverageSupport = 0;
my $startTime = time();
@@ -107,13 +107,14 @@ my $usage = <<EOF;
Usage: $programName [options] [options to pass to build system]
--help Show this help message
--clean Cleanup the build directory
- --debug Compile in debug mode
- --gyp Use GYP-generated project files
+ --debug Compile with Debug configuration
+ --release Compile with Release configuration
+ --sdk=<sdk> Use a specific Xcode SDK (iOS and Mac only)
+ --device Use the current iphoneos.internal SDK (iOS only)
+ --simulator Use the current iphonesimulator SDK (iOS only)
--coverage Enable Code Coverage support (Mac only)
--blackberry Build the BlackBerry port on Mac/Linux
- --chromium Build the Chromium port on Mac/Win/Linux
- --chromium-android Build the Chromium port on Android
--efl Build the EFL port
--gtk Build the GTK+ port
--qt Build the Qt port
@@ -128,11 +129,12 @@ Usage: $programName [options] [options to pass to build system]
--prefix=<path> Set installation prefix to the given path (Gtk/Efl/BlackBerry only)
--makeargs=<arguments> Optional Makefile flags
--qmakearg=<arguments> Optional qmake flags (Qt only, e.g. --qmakearg="CONFIG+=webkit2" to build WebKit2)
- --cmakearg=<arguments> Optional CMake flags (e.g. --cmakearg="-DFOO=bar -DCMAKE_PREFIX_PATH=/usr/local")
+ --cmakeargs=<arguments> Optional CMake flags (e.g. --cmakeargs="-DFOO=bar -DCMAKE_PREFIX_PATH=/usr/local")
--minimal No optional features, unless explicitly enabled
--only-webkit Build only the WebKit project
+ --no-webkit1 Omit WebKit1 code from the build (Qt/EFL/GTK only)
--no-webkit2 Omit WebKit2 code from the build
EOF
@@ -140,7 +142,6 @@ EOF
my %options = (
'help' => \$showHelp,
'clean' => \$clean,
- 'gyp' => \$useGYP,
'install-headers=s' => \$installHeaders,
'install-libs=s' => \$installLibs,
'prefix=s' => \$prefixPath,
@@ -148,6 +149,7 @@ my %options = (
'cmakeargs=s' => \$cmakeArgs,
'minimal' => \$minimal,
'only-webkit' => \$onlyWebKitProject,
+ 'no-webkit1' => \$noWebKit1,
'no-webkit2' => \$noWebKit2,
'coverage' => \$coverageSupport,
);
@@ -202,23 +204,9 @@ if (!isQt() && !-d "WebKitLibraries") {
die "Error: No WebKitLibraries directory found. Please do a fresh checkout.\n";
}
-# Generate the generate project files from .gyp files
-if ($useGYP) {
- system("perl", "Tools/Scripts/generate-project-files") == 0 or die "Failed to run generate-project-files";
-}
-
my @options = ();
-# enable autotool options accordingly
-if (isGtk()) {
- @options = @ARGV;
- foreach (@features) {
- push @options, autotoolsFlag(${$_->{value}}, $_->{option});
- }
-
- push @options, "--prefix=" . $prefixPath if defined($prefixPath);
- push @options, "--makeargs=" . $makeArgs if $makeArgs;
-} elsif (isAppleMacWebKit()) {
+if (isAppleMacWebKit()) {
push @options, XcodeOptions();
sub option($$$)
@@ -239,6 +227,9 @@ if (isGtk()) {
# WebKit2 is only supported in SnowLeopard and later at present.
push @projects, ("Source/WebKit2", "Tools/MiniBrowser") if osXVersion()->{"minor"} >= 6 and !$noWebKit2;
+ # WebInspectorUI must come before WebKit and WebKit2
+ unshift @projects, ("Source/WebInspectorUI");
+
# Build Tools needed for Apple ports
push @projects, ("Tools/DumpRenderTree", "Tools/WebKitTestRunner", "Source/ThirdParty/gtest", "Tools/TestWebKitAPI");
@@ -254,6 +245,7 @@ if (isGtk()) {
push @options, "--install-headers=" . $installHeaders if defined($installHeaders);
push @options, "--install-libs=" . $installLibs if defined($installLibs);
push @options, "--makeargs=" . $makeArgs if $makeArgs;
+ push @options, "WEBKIT_CONFIG-=build_webkit1" if $noWebKit1;
push @options, "WEBKIT_CONFIG-=build_webkit2" if $noWebKit2;
if (checkForArgumentAndRemoveFromARGV("-2")) {
@@ -282,45 +274,11 @@ if (isInspectorFrontend()) {
my $result = 0;
-if (isWx()) {
- $makeArgs .= " --port=wx";
-
- downloadWafIfNeeded();
- @options = split(/ /, $makeArgs);
- @projects = ();
- $result = buildWafProject('.', $clean, @options);
- exit exitStatus($result) if exitStatus($result);
-}
-
-if (isChromium()) {
- # Currently chromium does not honour the features passed to build-webkit.
- # Until this is solved, we issue a warning about that.
- foreach (@features) {
- if (${$_->{value}} ne $_->{default}) {
- print "\n";
- print "===========================================================\n";
- print " Chromium does not honor the features passed to build-webkit.\n";
- print " The preferred way is to set up your overrides in ~/.gyp/include.gypi.\n";
- print " See https://trac.webkit.org/wiki/Chromium#Buildingwithfeaturedefines\n";
- print " on how to do that.\n";
- print "===========================================================\n";
- last;
- }
- }
-
- @options = @ARGV;
- # Chromium doesn't build by project directories.
- @projects = ();
- push @options, "--makeargs=" . $makeArgs if $makeArgs;
- $result = buildChromium($clean, @options);
- exit exitStatus($result) if exitStatus($result);
-}
-
if (isEfl()) {
# By default we build using all of the available CPUs.
$makeArgs .= ($makeArgs ? " " : "") . "-j" . numberOfCPUs() if $makeArgs !~ /-j\s*\d+/;
- $cmakeArgs .= ($cmakeArgs ? " " : "") . "-DENABLE_WEBKIT=ON";
- $cmakeArgs .= " -DENABLE_WEBKIT2=ON" if !$noWebKit2;
+ $cmakeArgs = "-DENABLE_WEBKIT=OFF " . $cmakeArgs if $noWebKit1;
+ $cmakeArgs = "-DENABLE_WEBKIT2=OFF " . $cmakeArgs if $noWebKit2;
# We remove CMakeCache to avoid the bots to reuse cached flags when
# we enable new features. This forces a reconfiguration.
@@ -363,21 +321,25 @@ for my $dir (@projects) {
}
my $project = basename($dir);
+ my $baseProductDir = baseProductDir();
if (isGtk()) {
- if ($noWebKit2) {
- unshift(@options, "--disable-webkit2");
- }
- $result = buildGtkProject($project, $clean, @options);
+ $result = buildGtkProject($project, $clean, $prefixPath, $makeArgs, $noWebKit1, $noWebKit2, @features);
} elsif (isAppleMacWebKit()) {
my @local_options = @options;
- push @local_options, XcodeCoverageSupportOptions() if $coverageSupport && $project ne "ANGLE";
- my $useGYPProject = $useGYP && ($project =~ "WebCore|JavaScriptCore");
- my $projectPath = $useGYPProject ? "gyp/$project" : $project;
- $projectPath = $project =~ /gtest/ ? "xcode/gtest" : $project;
+ push @local_options, XcodeCoverageSupportOptions() if $coverageSupport;
+ my $projectPath = $project =~ /gtest/ ? "xcode/gtest" : $project;
$result = buildXCodeProject($projectPath, $clean, @local_options, @ARGV);
} elsif (isAppleWinWebKit()) {
if ($project eq "WebKit") {
- $result = buildVisualStudioProject("win/WebKit.vcproj/WebKit.sln", $clean);
+ my $webkitSolutionPath = "WebKit.vcxproj/WebKit.sln";
+ $result = buildVisualStudioProject($webkitSolutionPath, $clean);
+ my $vsConfiguration = configurationForVisualStudio();
+ if (usingVisualStudioExpress()) {
+ # Visual Studio Express is so lame it can't stdout build failures.
+ # So we find its logs and dump them to the console ourselves.
+ open(my $OUTPUT_HANDLE, '<', "$baseProductDir/$vsConfiguration/BuildOutput.htm") or die "Could not open build log file at $baseProductDir/$vsConfiguration/BuildOutput.htm";
+ print while (<$OUTPUT_HANDLE>);
+ }
}
}
# Various build* calls above may change the CWD.
@@ -385,15 +347,9 @@ for my $dir (@projects) {
if (exitStatus($result)) {
my $scriptDir = relativeScriptsDir();
- if (usingVisualStudioExpress()) {
- # Visual Studio Express is so lame it can't stdout build failures.
- # So we find its logs and dump them to the console ourselves.
- system(File::Spec->catfile($scriptDir, "print-vse-failure-logs"));
- }
if (isAppleWinWebKit()) {
print "\n\n===== BUILD FAILED ======\n\n";
print "Please ensure you have run $scriptDir/update-webkit to install dependencies.\n\n";
- my $baseProductDir = baseProductDir();
print "You can view build errors by checking the BuildLog.htm files located at:\n$baseProductDir/obj/<project>/<config>.\n";
}
exit exitStatus($result);
@@ -452,9 +408,7 @@ sub writeCongrats()
print "\n";
print "====================================================================\n";
print " WebKit is now built ($buildTime). \n";
- if (!isChromium()) {
- print " To run $launcherName with this newly-built code, use the\n";
- print " \"$launcherPath\" script.\n";
- }
+ print " To run $launcherName with this newly-built code, use the\n";
+ print " \"$launcherPath\" script.\n";
print "====================================================================\n";
}
diff --git a/Tools/Scripts/check-for-inappropriate-macros-in-external-headers b/Tools/Scripts/check-for-inappropriate-macros-in-external-headers
new file mode 100755
index 000000000..7e6c31f0e
--- /dev/null
+++ b/Tools/Scripts/check-for-inappropriate-macros-in-external-headers
@@ -0,0 +1,77 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script checks that the given headers in the framework build product do
+# not contain Platform.h and Compiler.h macros such as PLATFORM, COMPILER, etc.
+# This is meant to limit the exposure of the WTF headers, ensuring that if
+# clients include these headers they would not also need WTF's Platform.h.
+
+base_directory = ENV['TARGET_BUILD_DIR'] or throw "Unable to find TARGET_BUILD_DIR in the environment!"
+project_name = ENV['PROJECT_NAME'] or throw "Unable to find PROJECT_NAME in the environment!"
+$is_shallow_bundle = (ENV['SHALLOW_BUNDLE'] || "NO").upcase == "YES"
+
+$error_printed = false
+
+def print_error(msg)
+ $error_printed = true
+ STDERR.puts "ERROR: #{msg}"
+end
+
+def framework_headers_for_path(framework, path)
+ full_path = File.join Dir.pwd, framework, $is_shallow_bundle ? "" : "Versions/A/", path
+ if File.directory? full_path
+ Dir.glob "#{full_path}/**/*.h"
+ elsif File.exists? full_path
+ [full_path]
+ else
+ print_error "path '#{full_path}' for argument '#{path}' does not exist."
+ [] # Return an empty list so we can continue to check the other paths.
+ end
+end
+
+def verify_macros_in_header(header)
+ File.open(header) do |file|
+ file.each_line.with_index do |line, index|
+ # Check for the common macros from Platform.h and Compiler.h.
+ # NOTE: Negative lookahead (?!error) prevents matching "#error WebKit was not available prior to Mac OS X 10.2".
+ # NOTE: Negative lookahead (?!:2) prevents matching OS2 in macros like "defined(__OS2__)".
+ if match = /^\s*#(?!error).*?\b(PLATFORM|CPU|HAVE|OS(?!2)|USE|ENABLE|COMPILER)/.match(line)
+ print_error "'#{header}:#{index+1}' included forbidden macro '#{match[1]}' => '#{line.chomp}'"
+ end
+ end
+ end
+end
+
+
+Dir.chdir base_directory
+
+framework = "#{project_name}.framework"
+ARGV.each do |path|
+ framework_headers_for_path(framework, path).each do |header|
+ verify_macros_in_header(header)
+ end
+end
+
+exit 1 if $error_printed
diff --git a/Tools/Scripts/check-for-webkit-framework-include-consistency b/Tools/Scripts/check-for-webkit-framework-include-consistency
index 339fa7e18..fabc6922c 100755
--- a/Tools/Scripts/check-for-webkit-framework-include-consistency
+++ b/Tools/Scripts/check-for-webkit-framework-include-consistency
@@ -27,10 +27,6 @@
base_directory = ENV['TARGET_BUILD_DIR'] or throw "Unable to find TARGET_BUILD_DIR in the environment!"
is_shallow_bundle = (ENV['SHALLOW_BUNDLE'] || "NO").upcase == "YES"
-unless base_directory
- throw "Unable to find TARGET_BUILD_DIR in the environment!"
-end
-
Dir.chdir base_directory
$PERMITTED_INCLUDE_TYPES = { :public => [ :public ], :private => [ :public, :private ] }
@@ -73,9 +69,6 @@ def resolve_include(header, included_header, permitted_types)
# A header of any type including a WebCore header is a recipe for disaster.
if framework == "WebCore"
- # <rdar://problem/7718826> WebKeyGenerator.h should not include a WebCore header
- return if header =~ /\/WebKeyGenerator.h$/ and included_header_name == "WebCoreKeyGenerator.h"
-
print_error "#{header} included #{included_header}!"
return
end
diff --git a/Tools/Scripts/check-inspector-strings b/Tools/Scripts/check-inspector-strings
index 267c03a68..2408031df 100755
--- a/Tools/Scripts/check-inspector-strings
+++ b/Tools/Scripts/check-inspector-strings
@@ -44,7 +44,6 @@ from webkitpy.style.filereader import TextFileReader
from webkitpy.style.main import change_directory
_inspector_directory = "Source/WebCore/inspector/front-end"
-_devtools_directory = "Source/WebKit/chromium/src/js"
_localized_strings = "Source/WebCore/English.lproj/localizedStrings.js"
_log = logging.getLogger("check-inspector-strings")
@@ -142,7 +141,7 @@ if __name__ == "__main__":
strings_extractor = StringsExtractor([r"(WebInspector\.UIString\(.*)", r"\"((?:[^\"\\]|\\.)*?)\""])
file_reader = TextFileReader(filesystem, strings_extractor)
- file_reader.process_paths([_inspector_directory, _devtools_directory])
+ file_reader.process_paths([_inspector_directory])
localized_strings_extractor = LocalizedStringsExtractor()
localized_strings_extractor.process_file(_localized_strings)
raw_ui_strings = frozenset(strings_extractor.strings[0])
diff --git a/Tools/Scripts/detect-mismatched-virtual-const b/Tools/Scripts/detect-mismatched-virtual-const
deleted file mode 100755
index b345cb24a..000000000
--- a/Tools/Scripts/detect-mismatched-virtual-const
+++ /dev/null
@@ -1,167 +0,0 @@
-#!/usr/bin/perl -w
-
-# Copyright (C) 2008 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
-# its contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#
-# This script attempts to find instances of a problem where the signatures
-# of virtual methods fail to match because one is defined 'const', and another
-# is not. For example:
-# virtual void Base::doStuff() const;
-# virtual void Derived::doStuff();
-#
-# The lack of 'const' on the derived class gives it a different signature, and
-# it will therefore not be called when doStuff() is called on a derived object
-# via a base class pointer.
-#
-# Limitations of this script:
-# * It only works on things in the WebCore namespace
-# * Not all templatized methods may be found correctly
-# * It doesn't know anything about inheritance, or if methods are actually virtual
-# * It has lots of false positives (should add a whitelist for known-good signatures,
-# and specific methods)
-# * It's rather slow
-#
-# Added by Simon Fraser <simon.fraser@apple.com>
-#
-# Run the script like this:
-# WebKitTools/Scripts/detect-mismatched-virtual-const WebKitBuild/Debug/WebCore.framework/WebCore
-#
-# Output consists of a series of warnings like this:
-#
-# Both const and non-const versions of bgColor():
-# HTMLDocument::bgColor()
-# HTMLBodyElement::bgColor() const
-# HTMLTableElement::bgColor() const
-# HTMLTableRowElement::bgColor() const
-# HTMLTableCellElement::bgColor() const
-#
-
-use strict;
-no warnings qw /syntax/;
-
-
-my $file = $ARGV[0];
-
-print "Looking for unmatched const methods in $file\n";
-
-if (!open NM, "(nm '$file' | c++filt | sed 's/^/STDOUT:/') 2>&1 |") {
- die "Could not open $file\n";
-}
-
-my $nestedParens;
- $nestedParens = qr /
- [(]
- [^()]*
- (?:
- (??{ $nestedParens })
- [^()]*
- )*
- [)]/x;
-
-my $nestedAngleBrackets;
- $nestedAngleBrackets = qr /
- [<]
- [^<>]*
- (?:
- (??{ $nestedAngleBrackets })
- [^<>]*
- )*
- [>]/x;
-
-my $bal;
- $bal = qr /([^:]+
- (??{ $nestedAngleBrackets })?
- (??{ $nestedParens }))
- ([^()]*)$/x;
-
-my %signature_map = ();
-
-while (<NM>) {
- my $line = $_;
- chomp($line);
- if ($line =~ m/ [tT] WebCore::(.+)$/) {
- my $method = $1;
-
- if ($method =~ /$bal/) {
- my $signature = $1;
- my $const = $2 eq " const";
-
- my $class = substr($method, 0, length($method) - length($signature) - ($const ? 6 : 0));
-
-# print "line: $line\nclass: $class\nmethod: $method\nsignature: $signature\nconst: $const\n\n";
-
- my %method_info = (
- 'class' => $class,
- 'const' => $const,
- 'method' => $method,
- );
-
- push @{$signature_map{$signature}}, \%method_info;
- } else {
- print "unmatched line $method\n\n"
- }
- }
-}
-close NM;
-
-my $sig;
-for $sig (keys %signature_map) {
- #print "\n$sig\n";
-
- my @entries = @{$signature_map{$sig}};
-# print "$#entries\n";
-
- my $num_const = 0;
- my $num_not_const = 0;
- my $i;
- for $i (0 .. $#entries) {
- my $entry = @entries[$i];
-
- my $class = $entry->{'class'};
- my $const = $entry->{'const'};
-
- if ($const) {
- $num_const++;
- } else {
- $num_not_const++;
- }
- }
-
- if ($#entries > 1 && $num_const > 0 && $num_not_const > 0) {
- print "Both const and non-const versions of $sig:\n";
-
- for $i (0 .. $#entries) {
- my $entry = @entries[$i];
- my $method = $entry->{'method'};
- print "\t$method\n";
- }
-
- }
-}
-
-
-
diff --git a/Tools/Scripts/display-profiler-output b/Tools/Scripts/display-profiler-output
new file mode 100755
index 000000000..1a1deeedc
--- /dev/null
+++ b/Tools/Scripts/display-profiler-output
@@ -0,0 +1,938 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require 'rubygems'
+
+require 'readline'
+
+begin
+ require 'json'
+ require 'highline'
+rescue LoadError
+ $stderr.puts "Error: some required gems are not installed!"
+ $stderr.puts
+ $stderr.puts "Try running:"
+ $stderr.puts
+ $stderr.puts "sudo gem install json"
+ $stderr.puts "sudo gem install highline"
+ exit 1
+end
+
+class Bytecode
+ attr_accessor :bytecodes, :bytecodeIndex, :opcode, :description, :topCounts, :bottomCounts, :machineInlinees, :osrExits
+
+ def initialize(bytecodes, bytecodeIndex, opcode, description)
+ @bytecodes = bytecodes
+ @bytecodeIndex = bytecodeIndex
+ @opcode = opcode
+ @description = description
+ @topCounts = [] # "source" counts
+ @bottomCounts = {} # "machine" counts, maps compilations to counts
+ @machineInlinees = {} # maps my compilation to a set of inlinees
+ @osrExits = []
+ end
+
+ def shouldHaveCounts?
+ @opcode != "op_call_put_result"
+ end
+
+ def addTopCount(count)
+ @topCounts << count
+ end
+
+ def addBottomCountForCompilation(count, compilation)
+ @bottomCounts[compilation] = [] unless @bottomCounts[compilation]
+ @bottomCounts[compilation] << count
+ end
+
+ def addMachineInlinee(compilation, inlinee)
+ @machineInlinees[compilation] = {} unless @machineInlinees[compilation]
+ @machineInlinees[compilation][inlinee] = true
+ end
+
+ def totalTopExecutionCount
+ sum = 0
+ @topCounts.each {
+ | value |
+ sum += value.count
+ }
+ sum
+ end
+
+ def topExecutionCount(engine)
+ sum = 0
+ @topCounts.each {
+ | value |
+ if value.engine == engine
+ sum += value.count
+ end
+ }
+ sum
+ end
+
+ def totalBottomExecutionCount
+ sum = 0
+ @bottomCounts.each_value {
+ | counts |
+ max = 0
+ counts.each {
+ | value |
+ max = [max, value.count].max
+ }
+ sum += max
+ }
+ sum
+ end
+
+ def bottomExecutionCount(engine)
+ sum = 0
+ @bottomCounts.each_pair {
+ | compilation, counts |
+ if compilation.engine == engine
+ max = 0
+ counts.each {
+ | value |
+ max = [max, value.count].max
+ }
+ sum += max
+ end
+ }
+ sum
+ end
+
+ def totalExitCount
+ sum = 0
+ @osrExits.each {
+ | exit |
+ sum += exit.count
+ }
+ sum
+ end
+end
+
+class Bytecodes
+ attr_accessor :codeHash, :inferredName, :source, :instructionCount, :machineInlineSites, :compilations
+
+ def initialize(json)
+ @codeHash = json["hash"].to_s
+ @inferredName = json["inferredName"].to_s
+ @source = json["sourceCode"].to_s
+ @instructionCount = json["instructionCount"].to_i
+ @bytecode = {}
+ json["bytecode"].each {
+ | subJson |
+ index = subJson["bytecodeIndex"].to_i
+ @bytecode[index] = Bytecode.new(self, index, subJson["opcode"].to_s, subJson["description"].to_s)
+ }
+ @machineInlineSites = {} # maps compilation to a set of origins
+ @compilations = []
+ end
+
+ def name(limit)
+ if to_s.size > limit
+ "\##{@codeHash}"
+ else
+ to_s
+ end
+ end
+
+ def to_s
+ "#{@inferredName}\##{@codeHash}"
+ end
+
+ def matches(pattern)
+ if pattern =~ /^#/
+ $~.post_match == @codeHash
+ elsif pattern =~ /#/
+ pattern == to_s
+ else
+ pattern == @inferredName or pattern == @codeHash
+ end
+ end
+
+ def each
+ @bytecode.values.sort{|a, b| a.bytecodeIndex <=> b.bytecodeIndex}.each {
+ | value |
+ yield value
+ }
+ end
+
+ def bytecode(bytecodeIndex)
+ @bytecode[bytecodeIndex]
+ end
+
+ def addMachineInlineSite(compilation, origin)
+ @machineInlineSites[compilation] = {} unless @machineInlineSites[compilation]
+ @machineInlineSites[compilation][origin] = true
+ end
+
+ def totalMachineInlineSites
+ sum = 0
+ @machineInlineSites.each_value {
+ | set |
+ sum += set.size
+ }
+ sum
+ end
+
+ def sourceMachineInlineSites
+ set = {}
+ @machineInlineSites.each_value {
+ | mySet |
+ set.merge!(mySet)
+ }
+ set.size
+ end
+
+ def totalMaxTopExecutionCount
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.totalTopExecutionCount].max
+ }
+ max
+ end
+
+ def maxTopExecutionCount(engine)
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.topExecutionCount(engine)].max
+ }
+ max
+ end
+
+ def totalMaxBottomExecutionCount
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.totalBottomExecutionCount].max
+ }
+ max
+ end
+
+ def maxBottomExecutionCount(engine)
+ max = 0
+ @bytecode.each_value {
+ | bytecode |
+ max = [max, bytecode.bottomExecutionCount(engine)].max
+ }
+ max
+ end
+
+ def totalExitCount
+ sum = 0
+ each {
+ | bytecode |
+ sum += bytecode.totalExitCount
+ }
+ sum
+ end
+end
+
+class ProfiledBytecode
+ attr_reader :bytecodeIndex, :description
+
+ def initialize(json)
+ @bytecodeIndex = json["bytecodeIndex"].to_i
+ @description = json["description"].to_s
+ end
+end
+
+class ProfiledBytecodes
+ attr_reader :header, :bytecodes
+
+ def initialize(json)
+ @header = json["header"]
+ @bytecodes = $bytecodes[json["bytecodesID"].to_i]
+ @sequence = json["bytecode"].map {
+ | subJson |
+ ProfiledBytecode.new(subJson)
+ }
+ end
+
+ def each
+ @sequence.each {
+ | description |
+ yield description
+ }
+ end
+end
+
+def originStackFromJSON(json)
+ json.map {
+ | subJson |
+ $bytecodes[subJson["bytecodesID"].to_i].bytecode(subJson["bytecodeIndex"].to_i)
+ }
+end
+
+class CompiledBytecode
+ attr_accessor :origin, :description
+
+ def initialize(json)
+ @origin = originStackFromJSON(json["origin"])
+ @description = json["description"].to_s
+ end
+end
+
+class ExecutionCounter
+ attr_accessor :origin, :engine, :count
+
+ def initialize(origin, engine, count)
+ @origin = origin
+ @engine = engine
+ @count = count
+ end
+end
+
+class OSRExit
+ attr_reader :compilation, :origin, :codeAddresses, :exitKind, :isWatchpoint, :count
+
+ def initialize(compilation, origin, codeAddresses, exitKind, isWatchpoint, count)
+ @compilation = compilation
+ @origin = origin
+ @codeAddresses = codeAddresses
+ @exitKind = exitKind
+ @isWatchpoint = isWatchpoint
+ @count = count
+ end
+
+ def dumpForDisplay(prefix)
+ puts(prefix + "EXIT: due to #{@exitKind}, #{@count} times")
+ end
+end
+
+class Compilation
+ attr_accessor :bytecode, :engine, :descriptions, :counters, :compilationIndex
+ attr_accessor :osrExits, :profiledBytecodes, :numInlinedGetByIds, :numInlinedPutByIds
+ attr_accessor :numInlinedCalls
+
+ def initialize(json)
+ @bytecode = $bytecodes[json["bytecodesID"].to_i]
+ @bytecode.compilations << self
+ @compilationIndex = @bytecode.compilations.size
+ @engine = json["compilationKind"]
+ @descriptions = json["descriptions"].map {
+ | subJson |
+ CompiledBytecode.new(subJson)
+ }
+ @descriptions.each {
+ | description |
+ next if description.origin.empty?
+ description.origin[1..-1].each_with_index {
+ | inlinee, index |
+ description.origin[0].addMachineInlinee(self, inlinee.bytecodes)
+ inlinee.bytecodes.addMachineInlineSite(self, description.origin[0...index])
+ }
+ }
+ @counters = {}
+ json["counters"].each {
+ | subJson |
+ origin = originStackFromJSON(subJson["origin"])
+ counter = ExecutionCounter.new(origin, @engine, subJson["executionCount"].to_i)
+ @counters[origin] = counter
+ origin[-1].addTopCount(counter)
+ origin[0].addBottomCountForCompilation(counter, self)
+ }
+ @osrExits = {}
+ json["osrExits"].each {
+ | subJson |
+ osrExit = OSRExit.new(self, originStackFromJSON(subJson["origin"]),
+ json["osrExitSites"][subJson["id"]].map {
+ | value |
+ value.hex
+ }, subJson["exitKind"], subJson["isWatchpoint"],
+ subJson["count"])
+ osrExit.codeAddresses.each {
+ | codeAddress |
+ osrExits[codeAddress] = [] unless osrExits[codeAddress]
+ osrExits[codeAddress] << osrExit
+ }
+ osrExit.origin[-1].osrExits << osrExit
+ }
+ @profiledBytecodes = []
+ json["profiledBytecodes"].each {
+ | subJson |
+ @profiledBytecodes << ProfiledBytecodes.new(subJson)
+ }
+ @numInlinedGetByIds = json["numInlinedGetByIds"]
+ @numInlinedPutByIds = json["numInlinedPutByIds"]
+ @numInlinedCalls = json["numInlinedCalls"]
+ end
+
+ def counter(origin)
+ @counters[origin]
+ end
+
+ def to_s
+ "#{bytecode}-#{compilationIndex}-#{engine}"
+ end
+end
+
+class DescriptionLine
+ attr_reader :actualCountsString, :sourceCountsString, :disassembly, :shouldShow
+
+ def initialize(actualCountsString, sourceCountsString, disassembly, shouldShow)
+ @actualCountsString = actualCountsString
+ @sourceCountsString = sourceCountsString
+ @disassembly = disassembly
+ @shouldShow = shouldShow
+ end
+
+ def codeAddress
+ if @disassembly =~ /^\s*(0x[0-9a-fA-F]+):/
+ $1.hex
+ else
+ nil
+ end
+ end
+end
+
+if ARGV.length != 1
+ $stderr.puts "Usage: display-profiler-output <path to profiler output file>"
+ $stderr.puts
+ $stderr.puts "The typical usage pattern for the profiler currently looks something like:"
+ $stderr.puts
+ $stderr.puts "Path/To/jsc -p profile.json myprogram.js"
+ $stderr.puts "display-profiler-output profile.json"
+ exit 1
+end
+
+$json = JSON::parse(IO::read(ARGV[0]))
+$bytecodes = $json["bytecodes"].map {
+ | subJson |
+ Bytecodes.new(subJson)
+}
+$compilations = $json["compilations"].map {
+ | subJson |
+ Compilation.new(subJson)
+}
+$engines = ["Baseline", "DFG"]
+
+def lpad(str,chars)
+ if str.length>chars
+ str
+ else
+ "%#{chars}s"%(str)
+ end
+end
+
+def rpad(str, chars)
+ while str.length < chars
+ str += " "
+ end
+ str
+end
+
+def center(str, chars)
+ while str.length < chars
+ str += " "
+ if str.length < chars
+ str = " " + str
+ end
+ end
+ str
+end
+
+def mayBeHash(hash)
+ hash =~ /#/ or hash.size == 6
+end
+
+def sourceOnOneLine(source, limit)
+ source.gsub(/\s+/, ' ')[0...limit]
+end
+
+def screenWidth
+ if $stdin.tty?
+ HighLine::SystemExtensions.terminal_size[0]
+ else
+ 200
+ end
+end
+
+def summary(mode)
+ remaining = screenWidth
+
+ # Figure out how many columns we need for the code block names, and for counts
+ maxCount = 0
+ maxName = 0
+ $bytecodes.each {
+ | bytecodes |
+ maxCount = ([maxCount] + $engines.map {
+ | engine |
+ bytecodes.maxTopExecutionCount(engine)
+ } + $engines.map {
+ | engine |
+ bytecodes.maxBottomExecutionCount(engine)
+ }).max
+ maxName = [bytecodes.to_s.size, maxName].max
+ }
+ maxCountDigits = maxCount.to_s.size
+
+ hashCols = [[maxName, 30].min, "CodeBlock".size].max
+ remaining -= hashCols + 1
+
+ countCols = [maxCountDigits * $engines.size, "Source Counts".size].max
+ remaining -= countCols + 1
+
+ if mode == :full
+ instructionCountCols = 6
+ remaining -= instructionCountCols + 1
+
+ machineCountCols = [maxCountDigits * $engines.size, "Machine Counts".size].max
+ remaining -= machineCountCols + 1
+
+ compilationsCols = 7
+ remaining -= compilationsCols + 1
+
+ inlinesCols = 9
+ remaining -= inlinesCols + 1
+
+ exitCountCols = 7
+ remaining -= exitCountCols + 1
+
+ recentOptsCols = 12
+ remaining -= recentOptsCols + 1
+ end
+
+ if remaining > 0
+ sourceCols = remaining
+ else
+ sourceCols = nil
+ end
+
+ print(center("CodeBlock", hashCols))
+ if mode == :full
+ print(" " + center("#Instr", instructionCountCols))
+ end
+ print(" " + center("Source Counts", countCols))
+ if mode == :full
+ print(" " + center("Machine Counts", machineCountCols))
+ print(" " + center("#Compil", compilationsCols))
+ print(" " + center("Inlines", inlinesCols))
+ print(" " + center("#Exits", exitCountCols))
+ print(" " + center("Last Opts", recentOptsCols))
+ end
+ if sourceCols
+ print(" " + center("Source", sourceCols))
+ end
+ puts
+
+ print(center("", hashCols))
+ if mode == :full
+ print(" " + (" " * instructionCountCols))
+ end
+ print(" " + center("Base/DFG", countCols))
+ if mode == :full
+ print(" " + center("Base/DFG", machineCountCols))
+ print(" " + (" " * compilationsCols))
+ print(" " + center("Src/Total", inlinesCols))
+ print(" " + (" " * exitCountCols))
+ print(" " + center("Get/Put/Call", recentOptsCols))
+ end
+ puts
+ $bytecodes.sort {
+ | a, b |
+ b.totalMaxTopExecutionCount <=> a.totalMaxTopExecutionCount
+ }.each {
+ | bytecode |
+ print(center(bytecode.name(hashCols), hashCols))
+ if mode == :full
+ print(" " + center(bytecode.instructionCount.to_s, instructionCountCols))
+ end
+ print(" " +
+ center($engines.map {
+ | engine |
+ bytecode.maxTopExecutionCount(engine).to_s
+ }.join("/"), countCols))
+ if mode == :full
+ print(" " + center($engines.map {
+ | engine |
+ bytecode.maxBottomExecutionCount(engine).to_s
+ }.join("/"), machineCountCols))
+ print(" " + center(bytecode.compilations.size.to_s, compilationsCols))
+ print(" " + center(bytecode.sourceMachineInlineSites.to_s + "/" + bytecode.totalMachineInlineSites.to_s, inlinesCols))
+ print(" " + center(bytecode.totalExitCount.to_s, exitCountCols))
+ lastCompilation = bytecode.compilations[-1]
+ if lastCompilation
+ optData = [lastCompilation.numInlinedGetByIds,
+ lastCompilation.numInlinedPutByIds,
+ lastCompilation.numInlinedCalls]
+ else
+ optData = ["N/A"]
+ end
+ print(" " + center(optData.join('/'), recentOptsCols))
+ end
+ if sourceCols
+ print(" " + sourceOnOneLine(bytecode.source, sourceCols))
+ end
+ puts
+ }
+end
+
+def executeCommand(*commandArray)
+ command = commandArray[0]
+ args = commandArray[1..-1]
+ case command
+ when "help", "h", "?"
+ puts "summary (s) Print a summary of code block execution rates."
+ puts "full (f) Same as summary, but prints more information."
+ puts "source Show the source for a code block."
+ puts "bytecode (b) Show the bytecode for a code block, with counts."
+ puts "profiling (p) Show the (internal) profiling data for a code block."
+ puts "display (d) Display details for a code block."
+ puts "inlines Show all inlining stacks that the code block was on."
+ puts "help (h) Print this message."
+ puts "quit (q) Quit."
+ when "quit", "q", "exit"
+ exit 0
+ when "summary", "s"
+ summary(:summary)
+ when "full", "f"
+ summary(:full)
+ when "source"
+ if args.length != 1
+ puts "Usage: source <code block hash>"
+ return
+ end
+ $bytecodes.each {
+ | bytecode |
+ if bytecode.matches(args[0])
+ puts bytecode.source
+ end
+ }
+ when "bytecode", "b"
+ if args.length != 1
+ puts "Usage: source <code block hash>"
+ return
+ end
+
+ hash = args[0]
+
+ countCols = 10 * $engines.size
+ machineCols = 10 * $engines.size
+ pad = 1
+ while (countCols + 1 + machineCols + pad) % 8 != 0
+ pad += 1
+ end
+
+ $bytecodes.each {
+ | bytecodes |
+ next unless bytecodes.matches(hash)
+ puts(center("Source Counts", countCols) + " " + center("Machine Counts", machineCols) +
+ (" " * pad) + center("Bytecode for #{bytecodes}", screenWidth - pad - countCols - 1 - machineCols))
+ puts(center("Base/DFG", countCols) + " " + center("Base/DFG", countCols))
+ bytecodes.each {
+ | bytecode |
+ if bytecode.shouldHaveCounts?
+ countsString = $engines.map {
+ | myEngine |
+ bytecode.topExecutionCount(myEngine)
+ }.join("/")
+ machineString = $engines.map {
+ | myEngine |
+ bytecode.bottomExecutionCount(myEngine)
+ }.join("/")
+ else
+ countsString = ""
+ machineString = ""
+ end
+ puts(center(countsString, countCols) + " " + center(machineString, machineCols) + (" " * pad) + bytecode.description.chomp)
+ bytecode.osrExits.each {
+ | exit |
+ puts(center("!!!!!", countCols) + " " + center("!!!!!", machineCols) + (" " * (pad + 10)) +
+ "EXIT: in #{exit.compilation} due to #{exit.exitKind}, #{exit.count} times")
+ }
+ }
+ }
+ when "profiling", "p"
+ if args.length != 1
+ puts "Usage: profiling <code block hash>"
+ return
+ end
+
+ hash = args[0]
+
+ first = true
+ $compilations.each {
+ | compilation |
+
+ compilation.profiledBytecodes.each {
+ | profiledBytecodes |
+ if profiledBytecodes.bytecodes.matches(hash)
+ if first
+ first = false
+ else
+ puts
+ end
+
+ puts "Compilation #{compilation}:"
+ profiledBytecodes.header.each {
+ | header |
+ puts(" " * 6 + header)
+ }
+ profiledBytecodes.each {
+ | bytecode |
+ puts(" " * 8 + bytecode.description)
+ profiledBytecodes.bytecodes.bytecode(bytecode.bytecodeIndex).osrExits.each {
+ | exit |
+ if exit.compilation == compilation
+ puts(" !!!!! EXIT: due to #{exit.exitKind}, #{exit.count} times")
+ end
+ }
+ }
+ end
+ }
+ }
+ when "inlines"
+ if args.length != 1
+ puts "Usage: inlines <code block hash>"
+ return
+ end
+
+ hash = args[0]
+
+ $bytecodes.each {
+ | bytecodes |
+ next unless bytecodes.matches(hash)
+
+ # FIXME: print something useful to say more about which code block this is.
+
+ $compilations.each {
+ | compilation |
+ myOrigins = []
+ compilation.descriptions.each {
+ | description |
+ if description.origin.index {
+ | myBytecode |
+ bytecodes == myBytecode.bytecodes
+ }
+ myOrigins << description.origin
+ end
+ }
+ myOrigins.uniq!
+ myOrigins.sort! {
+ | a, b |
+ result = 0
+ [a.size, b.size].min.times {
+ | index |
+ result = a[index].bytecodeIndex <=> b[index].bytecodeIndex
+ break if result != 0
+ }
+ result
+ }
+
+ next if myOrigins.empty?
+
+ printArray = []
+ lastPrintStack = []
+
+ def originToPrintStack(origin)
+ (0...(origin.size - 1)).map {
+ | index |
+ "bc\##{origin[index].bytecodeIndex} --> #{origin[index + 1].bytecodes}"
+ }
+ end
+
+ def printStack(printArray, stack, lastStack)
+ stillCommon = true
+ stack.each_with_index {
+ | entry, index |
+ next if stillCommon and entry == lastStack[index]
+ printArray << (" " * (index + 1) + entry)
+ stillCommon = false
+ }
+ end
+
+ myOrigins.each {
+ | origin |
+ currentPrintStack = originToPrintStack(origin)
+ printStack(printArray, currentPrintStack, lastPrintStack)
+ lastPrintStack = currentPrintStack
+ }
+
+ next if printArray.empty?
+
+ puts "Compilation #{compilation}:"
+ printArray.each {
+ | entry |
+ puts entry
+ }
+ }
+ }
+ when "display", "d"
+ compilationIndex = nil
+
+ case args.length
+ when 1
+ if args[0] == "*"
+ hash = nil
+ else
+ hash = args[0]
+ end
+ engine = nil
+ when 2
+ if mayBeHash(args[0])
+ hash = args[0]
+ engine = args[1]
+ else
+ engine = args[0]
+ hash = args[1]
+ end
+ else
+ puts "Usage: summary <code block hash> <engine>"
+ return
+ end
+
+ if hash and hash =~ /-([0-9]+)-/
+ hash = $~.pre_match
+ engine = $~.post_match
+ compilationIndex = $1.to_i
+ end
+
+ if engine and not $engines.index(engine)
+ pattern = Regexp.new(Regexp.escape(engine), "i")
+ trueEngine = nil
+ $engines.each {
+ | myEngine |
+ if myEngine =~ pattern
+ trueEngine = myEngine
+ break
+ end
+ }
+ unless trueEngine
+ puts "#{engine} is not a valid engine, try #{$engines.join(' or ')}."
+ return
+ end
+ engine = trueEngine
+ end
+
+ actualCountCols = 13
+ sourceCountCols = 10 * $engines.size
+
+ first = true
+ $compilations.each {
+ | compilation |
+ next if hash and not compilation.bytecode.matches(hash)
+ next if engine and compilation.engine != engine
+ next if compilationIndex and compilation.compilationIndex != compilationIndex
+
+ if first
+ first = false
+ else
+ puts
+ end
+
+ puts("Compilation #{compilation}:")
+ puts(" Num inlined: GetByIds: #{compilation.numInlinedGetByIds} PutByIds: #{compilation.numInlinedPutByIds} Calls: #{compilation.numInlinedCalls}")
+ puts(center("Actual Counts", actualCountCols) + " " + center("Source Counts", sourceCountCols) + " " + center("Disassembly in #{compilation.engine}", screenWidth - 1 - sourceCountCols - 1 - actualCountCols))
+ puts((" " * actualCountCols) + " " + center("Base/DFG", sourceCountCols))
+
+ lines = []
+
+ compilation.descriptions.each {
+ | description |
+ # FIXME: We should have a better way of detecting things like CountExecution nodes
+ # and slow path entries in the baseline JIT.
+ if description.description =~ /CountExecution\(/ and compilation.engine == "DFG"
+ shouldShow = false
+ else
+ shouldShow = true
+ end
+ if description.origin.empty? or not description.origin[-1].shouldHaveCounts? or (compilation.engine == "Baseline" and description.description =~ /^\s*\(S\)/)
+ actualCountsString = ""
+ sourceCountsString = ""
+ else
+ actualCountsString = compilation.counter(description.origin).count.to_s
+ sourceCountsString = $engines.map {
+ | myEngine |
+ description.origin[-1].topExecutionCount(myEngine)
+ }.join("/")
+ end
+ description.description.split("\n").each {
+ | line |
+ lines << DescriptionLine.new(actualCountsString, sourceCountsString, line.chomp, shouldShow)
+ }
+ }
+
+ exitPrefix = center("!!!!!", actualCountCols) + " " + center("!!!!!", sourceCountCols) + (" " * 25)
+
+ lines.each_with_index {
+ | line, index |
+ codeAddress = line.codeAddress
+ if codeAddress
+ list = compilation.osrExits[codeAddress]
+ if list
+ list.each {
+ | exit |
+ if exit.isWatchpoint
+ exit.dumpForDisplay(exitPrefix)
+ end
+ }
+ end
+ end
+ if line.shouldShow
+ puts(center(line.actualCountsString, actualCountCols) + " " + center(line.sourceCountsString, sourceCountCols) + " " + line.disassembly)
+ end
+ if codeAddress
+ # Find the next disassembly address.
+ endIndex = index + 1
+ endAddress = nil
+ while endIndex < lines.size
+ myAddress = lines[endIndex].codeAddress
+ if myAddress
+ endAddress = myAddress
+ break
+ end
+ endIndex += 1
+ end
+
+ if endAddress
+ list = compilation.osrExits[endAddress]
+ if list
+ list.each {
+ | exit |
+ unless exit.isWatchpoint
+ exit.dumpForDisplay(exitPrefix)
+ end
+ }
+ end
+ end
+ end
+ }
+ }
+ else
+ puts "Invalid command: #{command}"
+ end
+end
+
+if $stdin.tty?
+ executeCommand("full")
+end
+
+while commandLine = Readline.readline("> ", true)
+ executeCommand(*commandLine.split)
+end
+
diff --git a/Tools/Scripts/do-webcore-rename b/Tools/Scripts/do-webcore-rename
index cd3cdee7a..281f7baad 100755
--- a/Tools/Scripts/do-webcore-rename
+++ b/Tools/Scripts/do-webcore-rename
@@ -96,13 +96,7 @@ sub wanted
my $isDOMTypeRename = 1;
my %renames = (
# Renames go here in the form of:
- "JavaScriptAudioNode" => "ScriptProcessorNode",
- "RealtimeAnalyserNode" => "AnalyserNode",
- "AudioGainNode" => "GainNode",
- "AudioPannerNode" => "PannerNode",
- "AudioChannelSplitter" => "ChannelSplitterNode",
- "AudioChannelMerger" => "ChannelMergerNode",
- "Oscillator" => "OscillatorNode",
+ "WaveTable" => "PeriodicWave",
);
my %renamesContemplatedForTheFuture = (
diff --git a/Tools/Scripts/dump-webkit-tests-run b/Tools/Scripts/dump-webkit-tests-run
new file mode 100755
index 000000000..d2339dcca
--- /dev/null
+++ b/Tools/Scripts/dump-webkit-tests-run
@@ -0,0 +1,52 @@
+#!/usr/bin/python
+import json
+import optparse
+import os
+import sys
+
+
+def main(argv):
+ parser = optparse.OptionParser(usage='%prog worker_number [path-to-stats.json]')
+ _, args = parser.parse_args(argv)
+
+ worker_number = int(args.pop(0))
+ if args:
+ if os.path.exists(args[0]):
+ with open(args[0], 'r') as fp:
+ trie = json.load(fp)
+ else:
+ print >> sys.stderr, "file not found: %s" % args[0]
+ sys.exit(1)
+ else:
+ trie = json.load(sys.stdin)
+
+ results = convert_trie_to_flat_paths(trie)
+ tests_run = []
+ for (test, result) in results.iteritems():
+ # Each result is a dict containing
+ # { 'results': [worker #, test # in worker, driver pid,
+ # test time in msecs, test + compare time in msecs]}
+ if result['results'][0] == worker_number:
+ tests_run.append((test, result['results'][1]))
+
+ print "\n".join(t[0] for t in sorted(tests_run, key=lambda t: t[1]))
+
+
+def convert_trie_to_flat_paths(trie, prefix=None):
+ # Cloned from webkitpy.layout_tests.layout_package.json_results_generator
+ # so that this code can stand alone.
+ result = {}
+ for name, data in trie.iteritems():
+ if prefix:
+ name = prefix + "/" + name
+
+ if len(data) and not "results" in data:
+ result.update(convert_trie_to_flat_paths(data, name))
+ else:
+ result[name] = data
+
+ return result
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/Tools/Scripts/export-w3c-performance-wg-tests b/Tools/Scripts/export-w3c-performance-wg-tests
index aaa865915..1a41d447c 100755
--- a/Tools/Scripts/export-w3c-performance-wg-tests
+++ b/Tools/Scripts/export-w3c-performance-wg-tests
@@ -43,7 +43,7 @@ import shutil
import sys
if len(sys.argv) != 3:
- print 'USAGE: %s path_to_webkit_checkout_root path_to_webperf_checkout_root'
+ print 'USAGE: %s path_to_webkit_checkout_root path_to_webperf_checkout_root' % sys.argv[0]
sys.exit(1)
source_directory = os.path.join(sys.argv[1], 'LayoutTests', 'http', 'tests', 'w3c', 'webperf')
@@ -63,7 +63,6 @@ for directory_to_copy in directories_to_copy:
if not os.path.exists(destination_subdirectory):
os.makedirs(destination_subdirectory)
for root, dirs, files in os.walk(os.path.join(source_directory, directory_to_copy)):
- print root, dirs, files
root = os.path.relpath(root, source_directory)
for dirname in dirs:
destination_subdirectory = os.path.join(destination_directory, root, dirname)
@@ -78,3 +77,4 @@ for directory_to_copy in directories_to_copy:
for to_find, replace_with in replacements:
line = line.replace(to_find, replace_with)
out_file.write(line)
+ print 'Exported %s' % os.path.join(root, filename)
diff --git a/Tools/Scripts/extract-localizable-js-strings b/Tools/Scripts/extract-localizable-js-strings
new file mode 100755
index 000000000..b785ed3f6
--- /dev/null
+++ b/Tools/Scripts/extract-localizable-js-strings
@@ -0,0 +1,158 @@
+#!/usr/bin/perl -w
+
+# Copyright (C) 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+use strict;
+
+@ARGV >= 1 or die "Usage: extract-localizable-js-strings <file to update> [ directory... ]\nDid you mean to run update-webkit-localizable-strings instead?\n";
+
+my $fileToUpdate = shift @ARGV;
+-f $fileToUpdate or die "Couldn't find file to update $fileToUpdate\n";
+
+my @directories = ();
+my @directoriesToSkip = ();
+if (@ARGV < 1) {
+ push(@directories, ".");
+} else {
+ for my $dir (@ARGV) {
+ if ($dir =~ /^-(.*)$/) {
+ push @directoriesToSkip, $1;
+ } else {
+ push @directories, $dir;
+ }
+ }
+}
+
+my $sawError = 0;
+
+my $keyCollisionCount = 0;
+
+my $quotedDirectoriesString = '"' . join('" "', @directories) . '"';
+for my $dir (@directoriesToSkip) {
+ $quotedDirectoriesString .= ' -path "' . $dir . '" -prune -o';
+}
+
+my @files = ( split "\n", `find $quotedDirectoriesString \\( -name "*.html" -o -name "*.js" \\)` );
+
+for my $file (sort @files) {
+ $file =~ s-^./--;
+
+ open SOURCE, $file or die "can't open $file\n";
+
+ while (<SOURCE>) {
+ chomp;
+
+ # Handle WebInspector strings. Prints a warning if a non-string literal is passed to WebInspector.UIString().
+ HandleUIString($1, $1, "", $file, $.) while s/WebInspector\.UIString\("([^"]+)"\)//;
+ print "$file:$.:WARNING: $&\n" while s/WebInspector\.UIString\(.*?\)//;
+
+ # Handle strings for other projects that also use this script.
+ HandleUIString($2, $2, "", $file, $.) while s/(\bclass="[^"]*l12n-tooltip[^"]*"[^>]*)title="([^"]+)"/$1/;
+ HandleUIString($1, $1, "", $file, $.) while s/\btitle="([^"]+)"([^>]*class="[^"]*l12n-tooltip[^"]*")/$2/;
+ HandleUIString($2, $2, "", $file, $.) while s/<(\w+)[^>]*\bclass="[^"]*l12n[^"]*"[^>]*>([^>]+)<\/\1>//;
+ HandleUIString($1, $1, "", $file, $.) while s/HTMLViewController\.UIString\("([^"]+)"\)//;
+ HandleUIString($1, $1, "", $file, $.) while s/\bgetLocalizedString\("([^"]+)"\)//;
+ HandleUIString($1, $1, "", $file, $.) while s/\blocalizedStrings\["([^"]+)"\]//;
+ }
+
+ close SOURCE;
+}
+
+my %stringByKey;
+my %commentByKey;
+my %fileByKey;
+my %lineByKey;
+
+sub HandleUIString
+{
+ my ($string, $key, $comment, $file, $line) = @_;
+ my $bad = 0;
+
+ if (grep { $_ == 0xFFFD } unpack "U*", $string) {
+ print "$file:$line:ERROR:string for translation has illegal UTF-8 -- most likely a problem with the Text Encoding of the source file\n";
+ $bad = 1;
+ }
+
+ if ($string ne $key && grep { $_ == 0xFFFD } unpack "U*", $key) {
+ print "$file:$line:ERROR:key has illegal UTF-8 -- most likely a problem with the Text Encoding of the source file\n";
+ $bad = 1;
+ }
+
+ if (grep { $_ == 0xFFFD } unpack "U*", $comment) {
+ print "$file:$line:ERROR:comment for translation has illegal UTF-8 -- most likely a problem with the Text Encoding of the source file\n";
+ $bad = 1;
+ }
+
+ if ($bad) {
+ $sawError = 1;
+ return;
+ }
+
+ if ($stringByKey{$key} && $stringByKey{$key} ne $string) {
+ print "$file:$line:encountered the same key, \"$key\", twice, with different strings\n";
+ print "$fileByKey{$key}:$lineByKey{$key}:previous occurrence\n";
+ $keyCollisionCount++;
+ return;
+ }
+
+ if ($commentByKey{$key} && $commentByKey{$key} ne $comment) {
+ print "$file:$line:encountered the same key, \"$key\", twice, with different comments\n";
+ print "$fileByKey{$key}:$lineByKey{$key}:previous occurrence\n";
+ $keyCollisionCount++;
+ return;
+ }
+
+ $fileByKey{$key} = $file;
+ $lineByKey{$key} = $line;
+ $stringByKey{$key} = $string;
+ $commentByKey{$key} = $comment;
+}
+
+print "\n" if $sawError;
+
+print "$keyCollisionCount key collisions\n" if $keyCollisionCount;
+
+if ($sawError) {
+ print "\nErrors encountered. Exiting without writing to $fileToUpdate.\n";
+ exit 1;
+}
+
+my $localizedStrings = "var localizedStrings = new Object;\n\n";
+
+for my $key (sort keys %commentByKey) {
+ $localizedStrings .= "localizedStrings[\"$key\"] = \"$stringByKey{$key}\";\n";
+}
+
+# Write out the strings file in UTF-16 with a BOM.
+utf8::decode($localizedStrings) if $^V ge v5.8;
+my $output = pack "n*", (0xFEFF, unpack "U*", $localizedStrings);
+
+if (-e "$fileToUpdate") {
+ open STRINGS, ">", "$fileToUpdate" or die;
+ print STRINGS $output;
+ close STRINGS;
+} else {
+ print "$fileToUpdate does not exist\n";
+ exit 1;
+}
diff --git a/Tools/Scripts/extract-localizable-strings b/Tools/Scripts/extract-localizable-strings
index 88f368478..946eeec50 100755
--- a/Tools/Scripts/extract-localizable-strings
+++ b/Tools/Scripts/extract-localizable-strings
@@ -43,6 +43,7 @@
# The exceptions file has a list of strings in quotes, filenames, and filename/string pairs separated by :.
use strict;
+no warnings 'deprecated';
sub UnescapeHexSequence($);
diff --git a/Tools/Scripts/filter-build-webkit b/Tools/Scripts/filter-build-webkit
index 97a732708..3a7f95ff0 100755
--- a/Tools/Scripts/filter-build-webkit
+++ b/Tools/Scripts/filter-build-webkit
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
-# Copyright (C) 2011 Apple Inc. All rights reserved.
+# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -66,17 +66,21 @@ HTMLFOOTER
sub printLine($$);
sub setLogfileOption($$);
sub setOutputFormatOption($$);
+sub shouldIgnoreLine($$);
sub usageAndExit();
# Defined in VCSUtils.
sub possiblyColored($$);
-my $showHelp;
+# Global variables used only in global scope.
my $outputPath = "&STDOUT";
-my $outputFormat = "text";
-my $useColor = -t STDOUT;
-my $unfilteredOutputPath = "build.log";
-my $logUnfilteredOutput;
+my $showHelp;
+
+# Global variables used in global and subroutine scope.
+our $logUnfilteredOutput;
+our $outputFormat = "text";
+our $unfilteredOutputPath = "build.log";
+our $useColor = -t STDOUT;
sub usageAndExit()
{
@@ -119,28 +123,12 @@ print OUTPUT_HANDLE HTML_HEADER if ($outputFormat eq "html");
my $buildFinished;
my $buildFailed = 0;
-while (my $line = <>) {
+for (my $previousLine = "", my $line = <>; $line; $previousLine = $line, $line = <>) {
print UNFILTERED_OUTPUT_HANDLE $line if $logUnfilteredOutput;
chomp($line);
- next if $line =~ /^\s*$/;
- next if $line =~ /^Build settings from command line:/;
- next if $line =~ /make: Nothing to be done for `all'\./;
- next if $line =~ /^JavaScriptCore\/create_hash_table/;
- next if $line =~ /JavaScriptCore.framework\/PrivateHeaders\/create_hash_table/;
- next if $line =~ /^JavaScriptCore\/pcre\/dftables/;
- next if $line =~ /^Creating hashtable for /;
- next if $line =~ /^Wrote output to /;
- next if $line =~ /^(touch|perl|cat|rm -f|bison|flex|python|\/usr\/bin\/g\+\+|gperf|echo|sed|if \[ \-f|WebCore\/generate-export-file) /;
- next if $line =~ /^UNDOCUMENTED: /;
- next if $line =~ /libtool.*has no symbols/;
- next if $line =~ /^# Lower case all the values, as CSS values are case-insensitive$/;
- next if $line =~ /^if sort /;
- next if $line =~ /^ /;
- next if $line =~ /^printf /;
- next if $line =~ /^offlineasm: Nothing changed/;
- next if $line =~ /^Showing first/;
+ next if shouldIgnoreLine($previousLine, $line);
if ($line =~ /^={10}/) {
printLine($line, STYLE_SUCCESS);
@@ -222,3 +210,28 @@ sub setOutputFormatOption($$)
}
$outputFormat = $value;
}
+
+sub shouldIgnoreLine($$)
+{
+ my ($previousLine, $line) = @_;
+
+ return 1 if $line =~ /^\s*$/;
+ return 1 if $line =~ /^Build settings from command line:/;
+ return 1 if $line =~ /make: Nothing to be done for `all'\./;
+ return 1 if $line =~ /^JavaScriptCore\/create_hash_table/;
+ return 1 if $line =~ /JavaScriptCore.framework\/PrivateHeaders\/create_hash_table/;
+ return 1 if $line =~ /^JavaScriptCore\/pcre\/dftables/;
+ return 1 if $line =~ /^Creating hashtable for /;
+ return 1 if $line =~ /^Wrote output to /;
+ return 1 if $line =~ /^(touch|perl|cat|rm -f|bison|flex|python|\/usr\/bin\/g\+\+|gperf|echo|sed|if \[ \-f|WebCore\/generate-export-file) /;
+ return 1 if $line =~ /^UNDOCUMENTED: /;
+ return 1 if $line =~ /libtool.*has no symbols/;
+ return 1 if $line =~ /^# Lower case all the values, as CSS values are case-insensitive$/;
+ return 1 if $line =~ /^if sort /;
+ return 1 if $line =~ /^ / && $previousLine !~ /referenced from:$/;
+ return 1 if $line =~ /^printf /;
+ return 1 if $line =~ /^offlineasm: Nothing changed/;
+ return 1 if $line =~ /^Showing first/;
+
+ return 0;
+}
diff --git a/Tools/Scripts/generate-coverage-data b/Tools/Scripts/generate-coverage-data
index d06ee99dd..2532e4f9c 100755
--- a/Tools/Scripts/generate-coverage-data
+++ b/Tools/Scripts/generate-coverage-data
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
-# Copyright (C) 2005, 2006 Apple Computer, Inc. All rights reserved.
+# Copyright (C) 2005, 2006, 2013 Apple Computer, Inc. All rights reserved.
# Copyright (C) 2007 Holger Hans Peter Freyther. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,45 +27,156 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-# Simple script to build, run and visualize coverage information
+# Script to build, run and visualize coverage information
use strict;
use File::Basename;
use File::Spec;
use FindBin;
use Getopt::Long qw(:config pass_through);
+use JSON;
use lib $FindBin::Bin;
-use webkitdirs;
+use List::Util qw(sum);
+use List::Util qw(max);
use POSIX;
+use webkitdirs;
+use XML::Simple;
-# Generate a name for our results
-my $svnVersion = determineCurrentSVNRevision();
-my @timeData = localtime(time);
-my $resultName = $svnVersion . "-" . join('_', @timeData);
-my @otherOptions = ();
+sub parseGcovrOutput($);
+sub getFileHitsAndBranches($);
+sub addLineCounts($$$$$$);
+sub createResultName();
+sub generateReport($);
-# Move to the source directory
-# Delete old gcov files
-# Compile WebKit and run the tests
-# Generate the coverage graph...
-# Upload
-
-$ENV{'WEBKIT_COVERAGE_BUILD'} = 1;
chdirWebKit();
+system("mkdir WebKitBuild/Coverage") if ! -d "WebKitBuild/Coverage";
-# Clean-up old files
+# Delete old gcov files
print "Cleaning up\n";
-system("if [ -d WebKitBuild ]; then find WebKitBuild -name '*.gcda' -delete; fi;") == 0 or die;
-
+system("if [ -d WebKitBuild ]; then find WebKitBuild -name '*.gcda' -delete; fi;") == 0 or die "Cannot delete old gcda files (code coverage";
print "Building and testing\n";
-system("Tools/Scripts/build-webkit", "--coverage", @ARGV) == 0 or die;
-system "Tools/Scripts/new-run-webkit-tests", "--no-launch-safari";
-system "Tools/Scripts/run-javascriptcore-tests", "--coverage", @ARGV;
+system("Tools/Scripts/build-webkit", "--clean", @ARGV) == 0 or die "Cannot clean WebKit build";
+system("Tools/Scripts/build-webkit", "--coverage", "--release", @ARGV) == 0 or die "Cannot compile WebKit with code coverage";
+system("Tools/Scripts/run-javascriptcore-tests --no-build");
+system("Tools/Scripts/run-api-tests");
+system("Tools/Scripts/run-webkit-tests");
+system("Tools/Scripts/run-webkit-tests -2");
+generateReport(createResultName());
+print "Done\n";
-# Collect the data and generate a report
-print "Collecting coverage data\n";
-system("Tools/CodeCoverage/run-generate-coverage-data", $resultName, "WebKitBuild/Coverage") == 0 or die;
-system("Tools/CodeCoverage/regenerate-coverage-display", "WebKitBuild/Coverage", "WebKitBuild/Coverage/html") == 0 or die;
+sub generateReport()
+{
+ my ($reportName) = @_;
-print "Done\n";
+ # Generate the coverage data and report
+ print "Collecting coverage data\n";
+ system("python Tools/Scripts/webkitpy/tool/gcovr --xml --output=WebKitBuild/Coverage/" . $reportName . ".xml") == 0 or die "Cannot run gcovr";
+
+ # Collect useful data from xml to json format
+ my $jsonData = encode_json(parseGcovrOutput("WebKitBuild/Coverage/$reportName.xml"));
+ open my $templateFile, "<", "Tools/CodeCoverage/results-template.html" or die "Cannot open Tools/CodeCoverage/results-template.html";
+ my $templateHtml = join("", <$templateFile>);
+ close $templateFile;
+ $templateHtml =~ s/%CoverageDataJSON%/$jsonData/;
+
+ my $reportFilename = "WebKitBuild/Coverage/$reportName.html";
+ open my $reportFile, ">", $reportFilename or die "Cannot open $reportFilename";
+ print $reportFile $templateHtml;
+ close $reportFile;
+
+ # Open the report
+ my $url = "file://" . sourceDir() . "/WebKitBuild/Coverage/$reportName.html";
+ system "open \"$url\"";
+}
+
+sub parseGcovrOutput($)
+{
+ my ($xmlData) = @_;
+ my $sourceDir = sourceDir();
+
+ my @files;
+
+ # The xml output of gcovr uses a Java-like package/class names for directories and files
+ my $packages = new XML::Simple->XMLin($xmlData)->{"packages"}->{"package"};
+
+ foreach my $packageName (keys %{$packages}) {
+ my $classes = $packages->{$packageName}->{"classes"}->{"class"};
+
+ # Perl's XML::Simple causes files to be here in the parsed xml data structure
+ # if there's only one child, even though they're a layer deeper in the xml tree
+ if ($classes->{"filename"} && $classes->{"lines"}) {
+ if ($classes->{"filename"} =~ /$sourceDir/) {
+ push(@files, getFileHitsAndBranches($classes));
+ }
+ }
+ else {
+ foreach my $key (keys %{$classes}) {
+ my $class = $classes->{$key};
+ if ($class->{"filename"} =~ /$sourceDir/) {
+ push(@files,getFileHitsAndBranches($class));
+ }
+ }
+ }
+ }
+ return \@files;
+}
+
+sub getFileHitsAndBranches($)
+{
+ my ($class) = @_;
+
+ my @hits;
+ my @hitLines;
+ my @branchesPossible;
+ my @branchesTaken;
+ my @branchLines;
+
+ my $lines = $class->{"lines"}->{"line"};
+ if (ref($lines) eq "ARRAY") {
+ foreach my $line (@$lines) {
+ addLineCounts($line, \@hits, \@hitLines, \@branchesPossible, \@branchesTaken, \@branchLines);
+ }
+ } else {
+ addLineCounts($lines, \@hits, \@hitLines, \@branchesPossible, \@branchesTaken, \@branchLines);
+ }
+
+ my $file = {};
+ $file->{"hits"} = \@hits;
+ $file->{"hitLines"} = \@hitLines;
+ $file->{"branchesPossible"} = \@branchesPossible;
+ $file->{"branchesTaken"} = \@branchesTaken;
+ $file->{"branchLines"} = \@branchLines;
+ $file->{"filename"} = substr($class->{"filename"}, length(sourceDir()));
+ $file->{"coverage"} = abs($class->{"line-rate"});
+ if (@branchLines) {
+ $file->{"branchCoverage"} = abs($class->{"branch-rate"});
+ } else {
+ $file->{"branchCoverage"} = 1;
+ }
+ $file->{"totalHeat"} = sum(@hits);
+ $file->{"maxHeat"} = max(@hits);
+ return $file;
+}
+
+sub addLineCounts($$$$$$)
+{
+ my ($line, $hits, $hitLines, $branchesPossible, $branchesTaken, $branchLines) = @_;
+ push(@$hits, int($line->{"hits"}));
+ push(@$hitLines, int($line->{"number"}));
+ if($line->{"branch"} eq "true") {
+
+ # Extract the numerator and denominator of the condition-coverage attribute, which looks like "75% (3/4)"
+ $line->{"condition-coverage"} =~ /\((.*)\/(.*)\)/;
+ push(@$branchesTaken, int($1));
+ push(@$branchesPossible, int($2));
+ push(@$branchLines, int($line->{"number"}));
+ }
+}
+
+sub createResultName()
+{
+ my $svnVersion = determineCurrentSVNRevision();
+ my @timeData = localtime(time);
+ return $svnVersion . "-" . join('_', @timeData);
+}
diff --git a/Tools/Scripts/import-w3c-performance-wg-tests b/Tools/Scripts/import-w3c-performance-wg-tests
index e48e26188..caddde7f7 100755
--- a/Tools/Scripts/import-w3c-performance-wg-tests
+++ b/Tools/Scripts/import-w3c-performance-wg-tests
@@ -42,7 +42,7 @@ import os
import sys
if len(sys.argv) != 3:
- print 'USAGE: %s path_to_webperf_checkout_root path_to_webkit_checkout_root'
+ print 'USAGE: %s path_to_webperf_checkout_root path_to_webkit_checkout_root' % sys.argv[0]
sys.exit(1)
source_directory = os.path.join(sys.argv[1], 'tests')
diff --git a/Tools/Scripts/import-w3c-tests b/Tools/Scripts/import-w3c-tests
new file mode 100755
index 000000000..bb72096c0
--- /dev/null
+++ b/Tools/Scripts/import-w3c-tests
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import sys
+
+from webkitpy.w3c import test_importer
+
+
+sys.exit(test_importer.main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py b/Tools/Scripts/lint-test-expectations
index f06f94ef4..c56eb77db 100644..100755
--- a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectorywithlocalcommits.py
+++ b/Tools/Scripts/lint-test-expectations
@@ -1,9 +1,10 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -13,7 +14,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -26,9 +27,11 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
+import sys
+
+from webkitpy.common import version_check
+from webkitpy.layout_tests import lint_test_expectations
+
+
+sys.exit(lint_test_expectations.main(sys.argv[1:], sys.stdout, sys.stderr))
-class CleanWorkingDirectoryWithLocalCommits(CleanWorkingDirectory):
- def __init__(self, tool, options):
- # FIXME: This a bit of a hack. Consider doing this more cleanly.
- CleanWorkingDirectory.__init__(self, tool, options, allow_local_commits=True)
diff --git a/Tools/Scripts/new-run-webkit-httpd b/Tools/Scripts/new-run-webkit-httpd
index 9ace3e1b8..921e77971 100755
--- a/Tools/Scripts/new-run-webkit-httpd
+++ b/Tools/Scripts/new-run-webkit-httpd
@@ -61,7 +61,7 @@ def run(options):
raise 'Specifying port requires also a root.'
host = Host()
# FIXME: Make this work with other ports as well.
- port_obj = host.port_factory.get(port_name='chromium', options=options)
+ port_obj = host.port_factory.get(port_name='win', options=options)
httpd = http_server.Lighttpd(port_obj,
tempfile.gettempdir(),
port=options.port,
diff --git a/Tools/Scripts/new-run-webkit-websocketserver b/Tools/Scripts/new-run-webkit-websocketserver
index 15ed1f99e..25c9537f2 100755
--- a/Tools/Scripts/new-run-webkit-websocketserver
+++ b/Tools/Scripts/new-run-webkit-websocketserver
@@ -36,6 +36,7 @@ import tempfile
from webkitpy.common.host import Host
from webkitpy.layout_tests.servers import websocket_server
+from webkitpy.port import platform_options
def main():
@@ -71,6 +72,11 @@ def main():
option_parser.add_option('-v', '--verbose', action='store_true',
default=False,
help='Include debug-level logging.')
+
+ option_group = optparse.OptionGroup(option_parser, "Platform options")
+ option_group.add_options(platform_options())
+ option_parser.add_option_group(option_group)
+
options, args = option_parser.parse_args()
if not options.port:
@@ -97,8 +103,7 @@ def main():
kwds['pidfile'] = options.pidfile
host = Host()
- # FIXME: Make this work with other ports as well.
- port_obj = host.port_factory.get(port_name='chromium', options=options)
+ port_obj = host.port_factory.get(options.platform, options=options)
pywebsocket = websocket_server.PyWebSocket(port_obj, options.output_dir, **kwds)
log_level = logging.WARN
diff --git a/Tools/Scripts/old-run-webkit-tests b/Tools/Scripts/old-run-webkit-tests
index e4803c9c6..ed201acce 100755
--- a/Tools/Scripts/old-run-webkit-tests
+++ b/Tools/Scripts/old-run-webkit-tests
@@ -141,6 +141,7 @@ my $guardMalloc = '';
# an IPv6 environment. See https://bugs.webkit.org/show_bug.cgi?id=37104.
my $httpdPort = 8000;
my $httpdSSLPort = 8443;
+my $httpdAuxiliaryPort = 8080; # Port used by various tests in http/tests/security.
my $ignoreMetrics = 0;
my $webSocketPort = 8880;
# wss is disabled until all platforms support pyOpenSSL.
@@ -194,13 +195,12 @@ if (isWindows()) {
exit 1;
}
-# Default to --no-http for wx for now.
-$testHTTP = 0 if (isWx());
-
my $perlInterpreter = "perl";
my $expectedTag = "expected";
my $mismatchTag = "mismatch";
+my $refTag = "ref";
+my $notrefTag = "notref";
my $actualTag = "actual";
my $prettyDiffTag = "pretty-diff";
my $diffsTag = "diffs";
@@ -234,8 +234,6 @@ if (isAppleMacWebKit()) {
$platform = "qt";
} elsif (isGtk()) {
$platform = "gtk";
-} elsif (isWx()) {
- $platform = "wx";
} elsif (isWinCairo()) {
$platform = "wincairo";
} elsif (isCygwin() || isWindows()) {
@@ -293,6 +291,7 @@ Usage: $programName [options] [testdir|testpath ...]
-i|--ignore-tests Comma-separated list of directories or tests to ignore
--iterations n Number of times to run the set of tests (e.g. ABCABCABC)
--[no-]launch-safari Launch (or do not launch) Safari to display test results (default: $launchSafariDefault)
+ --[no-]show-results Same as --[no-]launch-safari
-l|--leaks Enable leaks checking
--[no-]new-test-results Generate results for new tests
--nthly n Restart DumpRenderTree every n tests (default: $testsPerDumpTool)
@@ -358,6 +357,7 @@ my $getOptionsResult = GetOptions(
'reverse' => \$reverseTests,
'root=s' => \$root,
'sample-on-timeout!' => \$runSample,
+ 'show-results!' => \$launchSafari,
'singly|1' => sub { $testsPerDumpTool = 1; },
'skipped=s' => \&validateSkippedArg,
'slowest' => \$report10Slowest,
@@ -1581,12 +1581,17 @@ sub configureAndOpenHTTPDIfNeeded()
{
return if $isHttpdOpen;
my $absTestResultsDirectory = resolveAndMakeTestResultsDirectory();
- my $listen = "127.0.0.1:$httpdPort";
my @args = (
"-c", "CustomLog \"$absTestResultsDirectory/access_log.txt\" common",
"-c", "ErrorLog \"$absTestResultsDirectory/error_log.txt\"",
- "-C", "Listen $listen"
);
+ foreach ($httpdPort, $httpdAuxiliaryPort, $httpdSSLPort) {
+ # We listen to both IPv4 and IPv6 loop-back addresses, but
+ # ignore requests to 8000 from random users on network.
+ # See <https://bugs.webkit.org/show_bug.cgi?id=37104>.
+ push @args, ("-C", "Listen 127.0.0.1:$_");
+ push @args, ("-C", "Listen [::1]:$_");
+ }
my @defaultArgs = getDefaultConfigForTestDirectory($testDirectory);
@args = (@defaultArgs, @args);
@@ -2630,15 +2635,18 @@ sub isUsedInReftest($)
my $filename = $_[0];
my @extensions = ('html','shtml','xml','xhtml','htm','php','svg','mht','pl');
my $extensionsJoined = join("|", @extensions);
- my $extensionExpression = "-$expectedTag(-$mismatchTag)?\\.(".$extensionsJoined.")\$";
-
- if ($filename =~ /$extensionExpression/) {
+ my $suffixExtensionExpression = "-($expectedTag|$refTag|$notrefTag)(-$mismatchTag)?\\.(".$extensionsJoined.")\$";
+ my $prefixExtensionExpression = "^($refTag|$notrefTag)-";
+ if ($filename =~ /$suffixExtensionExpression/ || $filename =~ /$prefixExtensionExpression/) {
return 1;
}
my $base = stripExtension($filename);
-
+
foreach my $extension (@extensions) {
- if (-f "$base-$expectedTag.$extension" || -f "$base-$expectedTag-$mismatchTag.$extension") {
+ if (-f "$base-$expectedTag.$extension" ||
+ -f "$base-$refTag.$extension" || -f "$base-$notrefTag.$extension" ||
+ -f "$base-$expectedTag-$mismatchTag.$extension" ||
+ -f "$refTag-$base.$extension" || -f "$notrefTag-$base.$extension") {
return 1;
}
}
@@ -2825,16 +2833,23 @@ sub setUpWindowsCrashLogSaving()
return;
}
- my $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{PROGRAMFILES}), "Debugging Tools for Windows (x86)", "ntsd.exe");
- unless (-f $ntsdPath) {
- $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{ProgramW6432}), "Debugging Tools for Windows (x64)", "ntsd.exe");
- unless (-f $ntsdPath) {
- $ntsdPath = File::Spec->catfile(toCygwinPath($ENV{SYSTEMROOT}), "system32", "ntsd.exe");
- unless (-f $ntsdPath) {
- print STDERR "Can't find ntsd.exe. Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n";
- return;
- }
+ my @possiblePaths = (
+ File::Spec->catfile(toCygwinPath($ENV{PROGRAMFILES}), "Windows Kits", "8.0", "Debuggers", "x64", "ntsd.exe"),
+ File::Spec->catfile(toCygwinPath($ENV{PROGRAMFILES}), "Windows Kits", "8.0", "Debuggers", "x86", "ntsd.exe"),
+ File::Spec->catfile(toCygwinPath($ENV{PROGRAMFILES}), "Debugging Tools for Windows (x86)", "ntsd.exe"),
+ File::Spec->catfile(toCygwinPath($ENV{ProgramW6432}), "Debugging Tools for Windows (x64)", "ntsd.exe"),
+ File::Spec->catfile(toCygwinPath($ENV{SYSTEMROOT}), "system32", "ntsd.exe"),
+ );
+
+ my $ntsdPath = shift @possiblePaths;
+
+ while (not -f $ntsdPath) {
+ if (!@possiblePaths) {
+ print STDERR "Can't find ntsd.exe. Crash logs will not be saved.\nSee <http://trac.webkit.org/wiki/BuildingOnWindows#GettingCrashLogs>.\n";
+ return;
}
+
+ $ntsdPath = shift @possiblePaths;
}
# If we used -c (instead of -cf) we could pass the commands directly on the command line. But
diff --git a/Tools/Scripts/pdevenv b/Tools/Scripts/pdevenv
deleted file mode 100755
index 464372850..000000000
--- a/Tools/Scripts/pdevenv
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/perl -w
-
-use strict;
-use warnings;
-
-use File::Temp qw/tempfile/;
-use FindBin;
-
-use lib $FindBin::Bin;
-use webkitdirs;
-
-my ($fh, $path) = tempfile(UNLINK => 0, SUFFIX => '.cmd') or die;
-
-chomp(my $vcBin = `cygpath -w "$FindBin::Bin/../vcbin"`);
-chomp(my $scriptsPath = `cygpath -w "$FindBin::Bin"`);
-
-my $vsToolsVar;
-if ($ENV{'VS80COMNTOOLS'}) {
- $vsToolsVar = "VS80COMNTOOLS";
-} elsif ($ENV{'VS90COMNTOOLS'}) {
- $vsToolsVar = "VS90COMNTOOLS";
-} else {
- print "*************************************************************\n";
- print "Cannot find Visual Studio tools dir.\n";
- print "Please ensure that \$VS80COMNTOOLS or \$VS90COMNTOOLS\n";
- print "is set to a valid location.\n";
- print "*************************************************************\n";
- die;
-}
-
-print $fh "\@echo off\n\n";
-print $fh "call \"\%" . $vsToolsVar . "\%\\vsvars32.bat\"\n\n";
-print $fh "set PATH=$vcBin;$scriptsPath;\%PATH\%\n\n";
-
-print $fh "IF EXIST \"\%VSINSTALLDIR\%\\Common7\\IDE\\devenv.com\" (devenv.com /useenv " . join(" ", @ARGV) . ") ELSE ";
-print $fh "VCExpress.exe /useenv " . join(" ", @ARGV) . "\n";
-
-
-close $fh;
-
-chmod 0755, $path;
-
-chomp($path = `cygpath -w -s '$path'`);
-
-exec("cmd /c \"call $path\"");
diff --git a/Tools/Scripts/prepare-ChangeLog b/Tools/Scripts/prepare-ChangeLog
index a3816e2dc..e6e7897b0 100755
--- a/Tools/Scripts/prepare-ChangeLog
+++ b/Tools/Scripts/prepare-ChangeLog
@@ -83,6 +83,8 @@ sub generateNewChangeLogs($$$$$$$$$$$);
sub getLatestChangeLogs($);
sub get_function_line_ranges($$);
sub get_function_line_ranges_for_cpp($$);
+sub delete_namespaces_from_ranges_for_cpp(\@\@);
+sub is_function_in_namespace($$);
sub get_function_line_ranges_for_java($$);
sub get_function_line_ranges_for_javascript($$);
sub get_function_line_ranges_for_perl($$);
@@ -499,7 +501,6 @@ sub generateNewChangeLogs($$$$$$$$$$$)
print CHANGE_LOG normalizeLineEndings("\n", $endl);
print CHANGE_LOG normalizeLineEndings(" Reviewed by $reviewer.\n\n", $endl);
- print CHANGE_LOG normalizeLineEndings(" Additional information of the change such as approach, rationale. Please add per-function descriptions below (OOPS!).\n\n", $endl);
if ($prefix =~ m/WebCore/ || `pwd` =~ m/WebCore/) {
if (@$addedRegressionTests) {
@@ -649,6 +650,7 @@ sub get_function_line_ranges_for_cpp($$)
my $next_word_could_be_namespace = 0;
my $potential_namespace = "";
my @namespaces;
+ my @all_namespaces;
while (<$file_handle>) {
# Handle continued multi-line comment.
@@ -812,6 +814,7 @@ sub get_function_line_ranges_for_cpp($$)
if ($potential_namespace) {
push @namespaces, $potential_namespace;
+ push @all_namespaces, $potential_namespace;
$potential_namespace = "";
$name = $namespaces[-1];
$namespace_start = $. + 1;
@@ -927,10 +930,27 @@ sub get_function_line_ranges_for_cpp($$)
warn "mismatched parentheses in $file_name\n" if $in_parentheses;
- return @ranges;
+ return delete_namespaces_from_ranges_for_cpp(@ranges, @all_namespaces);
}
+# Take in references to an array of line ranges for C functions in a given file
+# and an array of namespaces declared in that file and return an updated
+# list of line ranges with the namespaces removed.
+
+sub delete_namespaces_from_ranges_for_cpp(\@\@)
+{
+ my ($ranges, $namespaces) = @_;
+ return grep {!is_function_in_namespace($namespaces, $$_[2])} @$ranges;
+}
+
+
+sub is_function_in_namespace($$)
+{
+ my ($namespaces, $function_name) = @_;
+ return grep {$_ eq $function_name} @$namespaces;
+}
+
# Read a file and get all the line ranges of the things that look like Java
# classes, interfaces and methods.
diff --git a/Tools/Scripts/print-msvc-project-dependencies b/Tools/Scripts/print-msvc-project-dependencies
deleted file mode 100755
index dbc8402b3..000000000
--- a/Tools/Scripts/print-msvc-project-dependencies
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/usr/bin/perl -w
-
-# Copyright (C) 2008 Apple Inc. All Rights Reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-use strict;
-use File::Basename;
-
-sub printDependencyTree($);
-
-my $basename = basename($0);
-@ARGV or die "Usage: $basename sln1 [sln2 sln3...]";
-
-foreach my $sln (@ARGV) {
- printDependencyTree($sln);
-}
-
-exit;
-
-sub printDependencyTree($)
-{
- my ($sln) = @_;
-
- unless (-f $sln) {
- warn "Warning: Can't find $sln; skipping\n";
- return;
- }
-
- unless (open SLN, "<", $sln) {
- warn "Warning: Can't open $sln; skipping\n";
- return;
- }
-
- my %projectsByUUID = ();
- my $currentProject;
-
- my $state = "initial";
- foreach my $line (<SLN>) {
- if ($state eq "initial") {
- if ($line =~ /^Project\([^\)]+\) = "([^"]+)", "[^"]+", "([^"]+)"\r?$/) {
- my $name = $1;
- my $uuid = $2;
- if (exists $projectsByUUID{$uuid}) {
- warn "Warning: Project $name appears more than once in $sln; using first definition\n";
- next;
- }
- $currentProject = {
- name => $name,
- uuid => $uuid,
- dependencies => {},
- };
- $projectsByUUID{$uuid} = $currentProject;
-
- $state = "inProject";
- }
-
- next;
- }
-
- if ($state eq "inProject") {
- defined($currentProject) or die;
-
- if ($line =~ /^\s*ProjectSection\(ProjectDependencies\) = postProject\r?$/) {
- $state = "inDependencies";
- } elsif ($line =~ /^EndProject\r?$/) {
- $currentProject = undef;
- $state = "initial";
- }
-
- next;
- }
-
- if ($state eq "inDependencies") {
- defined($currentProject) or die;
-
- if ($line =~ /^\s*({[^}]+}) = ({[^}]+})\r?$/) {
- my $uuid1 = $1;
- my $uuid2 = $2;
- if (exists $currentProject->{dependencies}->{$uuid1}) {
- warn "Warning: UUID $uuid1 listed more than once as dependency of project ", $currentProject->{name}, "\n";
- next;
- }
-
- $uuid1 eq $uuid2 or warn "Warning: UUIDs in depedency section of project ", $currentProject->{name}, " don't match: $uuid1 $uuid2; using first UUID\n";
-
- $currentProject->{dependencies}->{$uuid1} = 1;
- } elsif ($line =~ /^\s*EndProjectSection\r?$/) {
- $state = "inProject";
- }
-
- next;
- }
- }
-
- close SLN or warn "Warning: Can't close $sln\n";
-
- my %projectsNotDependedUpon = %projectsByUUID;
- CANDIDATE: foreach my $candidateUUID (keys %projectsByUUID) {
- foreach my $projectUUID (keys %projectsByUUID) {
- next if $candidateUUID eq $projectUUID;
- foreach my $dependencyUUID (keys %{$projectsByUUID{$projectUUID}->{dependencies}}) {
- if ($candidateUUID eq $dependencyUUID) {
- delete $projectsNotDependedUpon{$candidateUUID};
- next CANDIDATE;
- }
- }
- }
- }
-
- foreach my $project (values %projectsNotDependedUpon) {
- printProjectAndDependencies($project, 0, \%projectsByUUID);
- }
-}
-
-sub printProjectAndDependencies
-{
- my ($project, $indentLevel, $projectsByUUID) = @_;
-
- print " " x $indentLevel, $project->{name}, "\n";
- foreach my $dependencyUUID (keys %{$project->{dependencies}}) {
- printProjectAndDependencies($projectsByUUID->{$dependencyUUID}, $indentLevel + 1, $projectsByUUID);
- }
-}
diff --git a/Tools/Scripts/print-vse-failure-logs b/Tools/Scripts/print-vse-failure-logs
deleted file mode 100755
index 7580465dd..000000000
--- a/Tools/Scripts/print-vse-failure-logs
+++ /dev/null
@@ -1,113 +0,0 @@
-#!/usr/bin/env python
-# Copyright (c) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# This is a very simple script designed to crawl the build directory
-# for visual studio express build logs and print them to stdout.
-
-from __future__ import with_statement
-
-import codecs
-import os
-import re
-
-from webkitpy.common.checkout import scm
-from webkitpy.common.system.executive import Executive
-from webkitpy.thirdparty import BeautifulSoup
-
-
-class PrintVisualStudioExpressLogs(object):
- def __init__(self):
- self._executive = Executive()
-
- def _find_buildlogs(self, build_directory):
- build_log_paths = []
- for dirpath, dirnames, filenames in os.walk(build_directory):
- for file_name in filenames:
- if file_name == "BuildLog.htm":
- file_path = os.path.join(dirpath, file_name)
- build_log_paths.append(file_path)
- return build_log_paths
-
- def _build_order(self):
- """Returns a list of project names in the order in which they are built."""
- script_path = os.path.join(self._scripts_directory(), "print-msvc-project-dependencies")
- sln_path = os.path.join(scm.find_checkout_root(), "WebKit", "win", "WebKit.vcproj", "WebKit.sln")
- lines = self._executive.run_command([script_path, sln_path]).splitlines()
- order = [line.strip() for line in lines if line.find("Folder") == -1]
- order.reverse()
- return order
-
- def _sort_buildlogs(self, log_paths):
- build_order = self._build_order()
- def sort_key(log_path):
- project_name = os.path.basename(os.path.dirname(os.path.dirname(log_path)))
- try:
- index = build_order.index(project_name)
- except ValueError:
- # If the project isn't in the list, sort it after all items that
- # are in the list.
- index = len(build_order)
- # Sort first by build order, then by project name
- return (index, project_name)
- return sorted(log_paths, key=sort_key)
-
- def _obj_directory(self):
- build_directory_script_path = os.path.join(self._scripts_directory(), "webkit-build-directory")
- # FIXME: ports/webkit.py should provide the build directory in a nice API.
- # NOTE: The windows VSE build does not seem to use different directories
- # for Debug and Release.
- build_directory = self._executive.run_command([build_directory_script_path, "--top-level"]).rstrip()
- return os.path.join(build_directory, "obj")
-
- def _scripts_directory(self):
- return os.path.dirname(__file__)
-
- def _relevant_text(self, log):
- soup = BeautifulSoup.BeautifulSoup(log)
- # The Output Window table is where the useful output starts in the build log.
- output_window_table = soup.find(text=re.compile("Output Window")).findParent("table")
- result = []
- for table in [output_window_table] + output_window_table.findNextSiblings("table"):
- result.extend([text.replace("&nbsp;", "") for text in table.findAll(text=True)])
- result.append("\n")
- return "".join(result)
-
- def main(self):
- build_log_paths = self._sort_buildlogs(self._find_buildlogs(self._obj_directory()))
-
- print "Found %s Visual Studio Express Build Logs:\n%s" % (len(build_log_paths), "\n".join(build_log_paths))
-
- for build_log_path in build_log_paths:
- print "%s:\n" % build_log_path
- with codecs.open(build_log_path, "r", "utf-16") as build_log:
- print self._relevant_text(build_log)
-
-
-if __name__ == '__main__':
- PrintVisualStudioExpressLogs().main()
diff --git a/Tools/Scripts/run-api-tests b/Tools/Scripts/run-api-tests
index ca71ac541..ca548fb0a 100755
--- a/Tools/Scripts/run-api-tests
+++ b/Tools/Scripts/run-api-tests
@@ -52,6 +52,7 @@ my $showHelp = 0;
my $verbose = 0;
my $dumpTests = 0;
my $build = 1;
+my $root;
my $buildDefault = $build ? "build" : "do not build";
my @testsFailed;
my @testsTimedOut;
@@ -63,14 +64,15 @@ Usage: $programName [options] [suite or test prefixes]
-v|--verbose Verbose output
-d|--dump-tests Dump the names of testcases without running them
--[no-]build Build (or do not build) unit tests prior to running (default: $buildDefault)
- --chromium Run the Chromium port on Mac/Win/Linux
+ --root= Path to the pre-built root containing TestWebKitAPI
EOF
GetOptions(
'help' => \$showHelp,
'verbose|v' => \$verbose,
'dump|d' => \$dumpTests,
- 'build!' => \$build
+ 'build!' => \$build,
+ 'root=s' => \$root
);
if ($showHelp) {
@@ -80,19 +82,9 @@ if ($showHelp) {
setConfiguration();
-# When running TestWebKitAPI for Chromium on Android, defer to the test runner in
-# Chromium as we can't execute the binary locally.
-if (isChromiumAndroid()) {
- my @runTestCommand = ('build/android/run_tests.py', '--verbose', '--webkit', '--exit_code', '--out-directory', sourceDir() . '/out', '-s', 'TestWebKitAPI');
- if (configuration() eq "Release") {
- push @runTestCommand, '--release';
- }
-
- chdir(sourceDir() . '/Source/WebKit/chromium') or die $!;
- exit exitStatus(system(@runTestCommand));
-}
+setConfigurationProductDir(Cwd::abs_path($root)) if (defined($root));
-buildTestTool() if $build;
+buildTestTool() if $build && !defined($root);
setPathForRunningWebKitApp(\%ENV);
my @testsToRun = listAllTests();
@@ -107,7 +99,7 @@ exit runTestsBySuite(@testsToRun, $verbose);
sub isSupportedPlatform()
{
- return isAppleMacWebKit() || isAppleWinWebKit() || isChromium();
+ return isAppleMacWebKit() || isAppleWinWebKit();
}
sub dumpTestsBySuite(\@)
@@ -273,6 +265,8 @@ sub listAllTests()
if ($line =~ m/\.$/) {
$suite = $line; # "SuiteName."
} else {
+ # Disabling WebKit2 API test on Windows since we will be disabling WebKit2 on Windows.
+ next if (isAppleWinWebKit() && $suite =~ m/WebKit2*/);
$line =~ s/^\s*//; # "TestName"
push @tests, $suite . $line; # "SuiteName.TestName"
}
diff --git a/Tools/Scripts/run-bindings-tests b/Tools/Scripts/run-bindings-tests
index a0785e405..a516d11ac 100755
--- a/Tools/Scripts/run-bindings-tests
+++ b/Tools/Scripts/run-bindings-tests
@@ -43,7 +43,6 @@ def main(argv):
generators = [
'JS',
- 'V8',
'ObjC',
'GObject',
'CPP'
diff --git a/Tools/Scripts/run-chromium-webkit-unit-tests b/Tools/Scripts/run-chromium-webkit-unit-tests
deleted file mode 100755
index 114f77af7..000000000
--- a/Tools/Scripts/run-chromium-webkit-unit-tests
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/perl -w
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-use strict;
-use File::Spec;
-use FindBin;
-use lib $FindBin::Bin;
-use webkitdirs;
-use VCSUtils;
-
-setConfiguration();
-
-# Defer to Android's test runners for running webkit_unit_tests on a device.
-if (checkForArgumentAndRemoveFromARGV('--platform=chromium-android')) {
- my @runTestCommand = ('build/android/run_tests.py', '--verbose', '--webkit', '--exit_code', '--out-directory', sourceDir() . '/out', '-s', 'webkit_unit_tests');
- if (configuration() eq "Release") {
- push @runTestCommand, '--release';
- }
-
- chdir(sourceDir() . '/Source/WebKit/chromium') or die $!;
- exit exitStatus(system(@runTestCommand));
-}
-
-push(@ARGV, "--chromium");
-my $pathToBinary = productDir() . "/webkit_unit_tests";
-exit exitStatus(system ($pathToBinary, @ARGV));
diff --git a/Tools/Scripts/run-efl-tests b/Tools/Scripts/run-efl-tests
index 52d119a12..cb6df5b7b 100755
--- a/Tools/Scripts/run-efl-tests
+++ b/Tools/Scripts/run-efl-tests
@@ -48,6 +48,9 @@ if ($xvfb_pid == 0) {
} else {
setConfiguration();
+ # Manually add this for jhbuildWrapperPrefixIfNeeded().
+ push(@ARGV, "--efl");
+
my $returnCode = exitStatus(generateBuildSystemFromCMakeProject("Efl", undef, cmakeBasedPortArguments()));
exit($returnCode) if $returnCode;
diff --git a/Tools/Scripts/run-gtk-tests b/Tools/Scripts/run-gtk-tests
index b24cb24e7..883ed56e0 100755
--- a/Tools/Scripts/run-gtk-tests
+++ b/Tools/Scripts/run-gtk-tests
@@ -62,26 +62,32 @@ class TestRunner:
SKIPPED = [
SkippedTest("unittests/testdownload", "/webkit/download/not-found", "Test fails in GTK Linux 64-bit Release bot", 82329),
- SkippedTest("unittests/testwebview", "/webkit/webview/icon-uri", "Test times out in GTK Linux 64-bit Release bot", 82328),
- SkippedTest("unittests/testwebresource", "/webkit/webresource/sub_resource_loading", "Test fails in GTK Linux 64-bit Release bot", 82330),
SkippedTest("unittests/testwebinspector", "/webkit/webinspector/close-and-inspect", "Test is flaky in GTK Linux 32-bit Release bot", 82869),
- SkippedTest("WebKit2APITests/TestWebKitWebView", "/webkit2/WebKitWebView/mouse-target", "Test is flaky in GTK Linux 32-bit Release bot", 82866),
+ SkippedTest("unittests/testwebresource", "/webkit/webresource/loading", "Test fails", 104689),
+ SkippedTest("unittests/testwebresource", "/webkit/webresource/sub_resource_loading", "Test fails in GTK Linux 64-bit Release bot", 82330),
+ SkippedTest("unittests/testwebview", "/webkit/webview/icon-uri", "Test times out in GTK Linux 64-bit Release bot", 82328),
+ SkippedTest("unittests/testatk", "/webkit/atk/getTextInParagraphAndBodyModerate", "Test fails", 105538),
SkippedTest("WebKit2APITests/TestResources", "/webkit2/WebKitWebView/resources", "Test is flaky in GTK Linux 32-bit Release bot", 82868),
- SkippedTest("WebKit2APITests/TestWebKitFindController", "/webkit2/WebKitFindController/hide", "Test always fails in Xvfb", 89810),
SkippedTest("WebKit2APITests/TestWebKitAccessibility", "/webkit2/WebKitAccessibility/atspi-basic-hierarchy", "Test fails", 100408),
- SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.WKConnection", "Tests fail and time out out", 84959),
- SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.RestoreSessionStateContainingFormData", "Session State is not implemented in GTK+ port", 84960),
- SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.SpacebarScrolling", "Test fails", 84961),
+ SkippedTest("WebKit2APITests/TestWebKitWebView", SkippedTest.ENTIRE_SUITE, "Test times out after r150890", 117689),
+ SkippedTest("WebKit2APITests/TestContextMenu", SkippedTest.ENTIRE_SUITE, "Test times out after r150890", 117689),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.CanHandleRequest", "Test fails", 88453),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.MouseMoveAfterCrash", "Test is flaky", 85066),
SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.NewFirstVisuallyNonEmptyLayoutForImages", "Test is flaky", 85066),
SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.NewFirstVisuallyNonEmptyLayoutFrames", "Test fails", 85037),
- SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.MouseMoveAfterCrash", "Test is flaky", 85066),
- SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.CanHandleRequest", "Test fails", 88453),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.RestoreSessionStateContainingFormData", "Session State is not implemented in GTK+ port", 84960),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.SpacebarScrolling", "Test fails", 84961),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.WKConnection", "Tests fail and time out out", 84959),
SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.WKPageGetScaleFactorNotZero", "Test fails and times out", 88455),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.ForceRepaint", "Test times out", 105532),
+ SkippedTest("TestWebKitAPI/TestWebKit2", "WebKit2.ReloadPageAfterCrash", "Test flakily times out", 110129),
]
def __init__(self, options, tests=[]):
self._options = options
- self._programs_path = common.build_path("Programs")
+ self._build_type = "Debug" if self._options.debug else "Release"
+
+ self._programs_path = common.build_path_for_build_types((self._build_type,), "Programs")
self._tests = self._get_tests(tests)
self._skipped_tests = TestRunner.SKIPPED
if not sys.stdout.isatty():
@@ -154,7 +160,7 @@ class TestRunner:
self._test_env["WEBKIT_INSPECTOR_PATH"] = os.path.abspath(os.path.join(self._programs_path, 'resources', 'inspector'))
self._test_env['GSETTINGS_BACKEND'] = 'memory'
self._test_env["TEST_WEBKIT_API_WEBKIT2_RESOURCES_PATH"] = common.top_level_path("Tools", "TestWebKitAPI", "Tests", "WebKit2")
- self._test_env["TEST_WEBKIT_API_WEBKIT2_INJECTED_BUNDLE_PATH"] = common.build_path("Libraries")
+ self._test_env["TEST_WEBKIT_API_WEBKIT2_INJECTED_BUNDLE_PATH"] = common.build_path_for_build_types((self._build_type,), "Libraries")
self._test_env["WEBKIT_EXEC_PATH"] = self._programs_path
try:
@@ -333,19 +339,21 @@ class TestRunner:
if failed_tests:
names = [test.replace(self._programs_path, '', 1) for test in failed_tests]
- sys.stdout.write("Tests failed: %s\n" % ", ".join(names))
+ sys.stdout.write("Tests failed (%d): %s\n" % (len(names), ", ".join(names)))
sys.stdout.flush()
if timed_out_tests:
names = [test.replace(self._programs_path, '', 1) for test in timed_out_tests]
- sys.stdout.write("Tests that timed out: %s\n" % ", ".join(names))
+ sys.stdout.write("Tests that timed out (%d): %s\n" % (len(names), ", ".join(names)))
sys.stdout.flush()
if self._skipped_tests and self._options.skipped_action == 'skip':
- sys.stdout.write("Tests skipped:\n%s\n" % "\n".join([str(skipped) for skipped in self._skipped_tests]))
+ sys.stdout.write("Tests skipped (%d):\n%s\n" %
+ (len(self._skipped_tests),
+ "\n".join([str(skipped) for skipped in self._skipped_tests])))
sys.stdout.flush()
- return len(failed_tests)
+ return len(failed_tests) + len(timed_out_tests)
if __name__ == "__main__":
if not jhbuildutils.enter_jhbuild_environment_if_available("gtk"):
diff --git a/Tools/Scripts/run-javascriptcore-tests b/Tools/Scripts/run-javascriptcore-tests
index 9cb4c483b..8b79f2514 100755
--- a/Tools/Scripts/run-javascriptcore-tests
+++ b/Tools/Scripts/run-javascriptcore-tests
@@ -140,11 +140,16 @@ chdir "tests/mozilla" or die "Failed to switch directory to 'tests/mozilla'\n";
printf "Running: jsDriver.pl -e squirrelfish -s %s -f actual.html %s\n", jscPath($productDir), join(" ", @jsArgs);
my @jsDriverCmd = ("perl", "jsDriver.pl", "-e", "squirrelfish", "-s", jscPath($productDir), "-f", "actual.html", @jsArgs);
if (isGtk() || isEfl()) {
- my $jhbuildPrefix = sourceDir() . "/Tools/";
- $jhbuildPrefix .= isEfl() ? "efl" : "";
- $jhbuildPrefix .= isGtk() ? "gtk" : "";
- $jhbuildPrefix .= "/run-with-jhbuild";
- unshift(@jsDriverCmd, $jhbuildPrefix);
+ my @jhbuildPrefix = sourceDir() . "/Tools/jhbuild/jhbuild-wrapper";
+
+ if (isEfl()) {
+ push(@jhbuildPrefix, '--efl');
+ } elsif (isGtk()) {
+ push(@jhbuildPrefix, '--gtk');
+ }
+ push(@jhbuildPrefix, 'run');
+
+ unshift(@jsDriverCmd, @jhbuildPrefix);
}
my $result = system(@jsDriverCmd);
exit exitStatus($result) if $result;
diff --git a/Tools/Scripts/run-jsc b/Tools/Scripts/run-jsc
index e5341c1d3..82943c0a1 100755
--- a/Tools/Scripts/run-jsc
+++ b/Tools/Scripts/run-jsc
@@ -42,7 +42,6 @@ my $count = 1;
my $verbose = 0;
GetOptions("count|c=i" => \$count,
"verbose|v" => \$verbose);
-die "$usage\n" if (@ARGV < 1);
my $jsc = jscProductDir() . "/jsc @ARGV";
$jsc .= " 2> " . File::Spec->devnull() unless $verbose;
diff --git a/Tools/Scripts/run-launcher b/Tools/Scripts/run-launcher
index eca07f6f3..66886fe0b 100755
--- a/Tools/Scripts/run-launcher
+++ b/Tools/Scripts/run-launcher
@@ -69,25 +69,18 @@ if (isQt()) {
} else {
unshift(@ARGV, catdir($launcherPath, "Programs", "GtkLauncher"));
}
- $launcherPath = catdir(sourceDir(), "Tools", "gtk", "run-with-jhbuild");
+ $launcherPath = catdir(sourceDir(), "Tools", "jhbuild", "jhbuild-wrapper");
+ unshift(@ARGV, ("--gtk", "run"));
}
-
+
if (isEfl()) {
if (isWK2()) {
unshift(@ARGV, catdir($launcherPath, "bin", "MiniBrowser"));
} else {
unshift(@ARGV, catdir($launcherPath, "bin", "EWebLauncher"));
}
- $launcherPath = catdir(sourceDir(), "Tools", "efl", "run-with-jhbuild");
- }
-
- if (isWx()) {
- if (isDarwin()) {
- $launcherPath = catdir($launcherPath, 'wxBrowser.app', 'Contents', 'MacOS', 'wxBrowser');
- } else {
- $ENV{LD_LIBRARY_PATH} = $ENV{LD_LIBRARY_PATH} ? "$productDir:$ENV{LD_LIBRARY_PATH}" : $productDir;
- $launcherPath = catdir($launcherPath, 'wxBrowser');
- }
+ $launcherPath = catdir(sourceDir(), "Tools", "jhbuild", "jhbuild-wrapper");
+ unshift(@ARGV, ("--efl", "run"));
}
print "Starting webkit launcher.\n";
diff --git a/Tools/Scripts/run-qtwebkit-tests b/Tools/Scripts/run-qtwebkit-tests
index ded87c5fa..ea8d4093c 100755
--- a/Tools/Scripts/run-qtwebkit-tests
+++ b/Tools/Scripts/run-qtwebkit-tests
@@ -44,7 +44,7 @@ class Options(Log):
def __init__(self, args):
Log.__init__(self, "Options")
log = self._log
- opt = OptionParser("%prog [options] PathToSearch.\nTry -h or --help.")
+ opt = OptionParser("%prog [options] [PathToSearch].\nTry -h or --help.")
opt.add_option("-j", "--parallel-level", action="store", type="int",
dest="parallel_level", default=None,
help="Number of parallel processes executing the Qt's tests. Default: cpu count.")
@@ -69,6 +69,12 @@ class Options(Log):
opt.add_option("-t", "--timeout", action="store", type="int",
dest="timeout", default=0,
help="Timeout in seconds for each testsuite. Zero value means that there is not timeout. Default: %default.")
+ opt.add_option("--release", action="store_true", dest="release", default=True,
+ help="Run API tests in WebKitBuild/Release/... directory. It is ignored if PathToSearch is passed.")
+ opt.add_option("--debug", action="store_false", dest="release",
+ help="Run API tests in WebKitBuild/Debug/... directory. It is ignored if PathToSearch is passed.")
+ opt.add_option("-2", "--webkit2", action="store_true", dest="webkit2", default=False,
+ help="Run WebKit2 API tests. Default: Run WebKit1 API tests. It is ignored if PathToSearch is passed.")
self._o, self._a = opt.parse_args(args)
verbose = self._o.verbose
@@ -83,18 +89,34 @@ class Options(Log):
else:
logging.basicConfig(level=logging.INFO,)
log.warn("Bad verbose level, switching to default.")
+
+ if self._o.release:
+ configuration = "Release"
+ else:
+ configuration = "Debug"
+
+ if self._o.webkit2:
+ test_directory = "WebKit2/UIProcess/API/qt/tests/"
+ else:
+ test_directory = "WebKit/qt/tests/"
+
try:
- if not os.path.exists(self._a[0]):
+ if len(self._a) == 0:
+ self._o.path = "WebKitBuild/%s/Source/%s" % (configuration, test_directory)
+ else:
+ if len(self._a) > 1:
+ raise IndexError("Only one directory should be provided.")
+ self._o.path = self._a[0]
+
+ if not os.path.exists(self._o.path):
raise Exception("Given path doesn't exist.")
- if len(self._a) > 1:
- raise IndexError("Only one directory could be provided.")
- self._o.path = self._a[0]
except IndexError:
log.error("Bad usage. Please try -h or --help.")
sys.exit(1)
except Exception:
- log.error("Path '%s' doesn't exist", self._a[0])
+ log.error("Path '%s' doesn't exist", self._o.path)
sys.exit(2)
+
if self._o.developer:
if not self._o.parallel_level is None:
log.warn("Developer mode sets parallel-level option to one.")
diff --git a/Tools/Scripts/run-sunspider b/Tools/Scripts/run-sunspider
index 1b60a7532..4aec3010d 100755
--- a/Tools/Scripts/run-sunspider
+++ b/Tools/Scripts/run-sunspider
@@ -38,9 +38,6 @@ my $configuration = configuration();
my $root;
my $testRuns = 10; # This number may be different from what sunspider defaults to (that's OK)
-my $runShark = 0;
-my $runShark20 = 0;
-my $runSharkCache = 0;
my $runInstruments = 0;
my $suite = "";
my $ubench = 0;
@@ -59,9 +56,6 @@ Usage: $programName [options] [options to pass to build system]
--root Path to root tools build
--runs Number of times to run tests (default: $testRuns)
--tests Only run tests matching provided pattern
- --shark Sample with the Mac OS X "Shark" performance testing tool (implies --runs=1)
- --shark20 Like --shark, but with a 20 microsecond sampling interval
- --shark-cache Like --shark, but performs a L2 cache-miss sample instead of time sample
--instruments Sample with the Mac OS X "Instruments" tool (Time Profile) (implies --runs=1)
--suite Select a specific benchmark suite. The default is sunspider-0.9.1
--ubench Use microbenchmark suite instead of regular tests. Same as --suite=ubench
@@ -73,9 +67,6 @@ EOF
GetOptions('root=s' => sub { my ($x, $value) = @_; $root = $value; setConfigurationProductDir(Cwd::abs_path($root)); },
'runs=i' => \$testRuns,
'set-baseline' => \$setBaseline,
- 'shark' => \$runShark,
- 'shark20' => \$runShark20,
- 'shark-cache' => \$runSharkCache,
'instruments' => \$runInstruments,
'suite=s' => \$suite,
'ubench' => \$ubench,
@@ -125,9 +116,6 @@ setupEnvironmentForExecution($productDir);
my @args = ("--shell", jscPath($productDir), "--runs", $testRuns);
# This code could be removed if we chose to pass extra args to sunspider instead of Xcode
push @args, "--set-baseline" if $setBaseline;
-push @args, "--shark" if $runShark;
-push @args, "--shark20" if $runShark20;
-push @args, "--shark-cache" if $runSharkCache;
push @args, "--instruments" if $runInstruments;
push @args, "--suite=${suite}" if $suite;
push @args, "--ubench" if $ubench;
diff --git a/Tools/Scripts/run-webkit-httpd b/Tools/Scripts/run-webkit-httpd
index 8fb1887d4..af4d4f375 100755
--- a/Tools/Scripts/run-webkit-httpd
+++ b/Tools/Scripts/run-webkit-httpd
@@ -89,7 +89,7 @@ my @args = (
"-X",
# Disable Keep-Alive support. Makes testing in multiple browsers easier (no need to wait
# for another browser's connection to expire).
- "-c", "KeepAlive 0"
+ "-c", "KeepAlive off"
);
my @defaultArgs = getDefaultConfigForTestDirectory($testDirectory);
diff --git a/Tools/Scripts/run-webkit-tests b/Tools/Scripts/run-webkit-tests
index 4bb8f3944..fd507e49a 100755
--- a/Tools/Scripts/run-webkit-tests
+++ b/Tools/Scripts/run-webkit-tests
@@ -57,28 +57,12 @@ sub runningOnBuildBot()
sub useNewRunWebKitTests()
{
- # NRWT Windows support still needs work: https://bugs.webkit.org/show_bug.cgi?id=38756
- return 0 if (isWindows() or isCygwin()) and !isChromium();
# NRWT does not support qt-arm: https://bugs.webkit.org/show_bug.cgi?id=64086
return 0 if isQt() and isARM();
# All other platforms should use NRWT by default.
return 1;
}
-sub platformIsReadyForParallelTesting()
-{
- # NRWT is able to run the tests in parallel, ORWT was not.
- # When we run the tests in parallel, tests which (incorrectly)
- # interact with each other can start failing.
- # To reduce the failure burden during the transition individual
- # platforms can opt-in to parallel test execution by default.
-
- # We believe all platforms are ready for default parallel testing except
- # Qt, as Qt runs more than one build-slave per-server.
- # Ossy has asked me to blacklist Qt for now.
- return !isQt();
-}
-
my $script = "perl";
my $harnessName = "old-run-webkit-tests";
@@ -86,12 +70,6 @@ if (useNewRunWebKitTests()) {
$script = "python";
$harnessName = "new-run-webkit-tests";
- if (!grep(/--child-processes/, @ARGV) and !platformIsReadyForParallelTesting()) {
- push(@ARGV, "--child-processes=1");
- print "Running new-run-webkit-tests with one child process.\n";
- print "For more parallelism, run new-run-webkit-tests directly.\n";
- }
-
if (runningOnBuildBot()) {
push(@ARGV, "--debug-rwt-logging");
}
@@ -112,10 +90,6 @@ if (isQt()) {
push(@ARGV, "--gtk");
} elsif (isEfl()) {
push(@ARGV, "--efl");
-} elsif (isChromiumAndroid()) {
- push(@ARGV, "--chromium-android");
-} elsif (isChromium()) {
- push(@ARGV, "--chromium");
} elsif (isWinCairo()) {
push(@ARGV, "--wincairo");
}
diff --git a/Tools/Scripts/sampstat b/Tools/Scripts/sampstat
new file mode 100755
index 000000000..72ba2b70d
--- /dev/null
+++ b/Tools/Scripts/sampstat
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2007, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import math
+import sys
+import re
+import fileinput
+from optparse import OptionParser
+
+usage = "usage: %prog [options] [FILES]\n Compute the mean and 95% confidence interval of a sample set.\n Standard input or files must contain two or more decimal numbers, one per line."
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--unit", dest="unit", default="",
+ help="assume values are in units of UNIT", metavar="UNIT")
+parser.add_option("-v", "--verbose",
+ action="store_true", dest="verbose", default=False,
+ help="print all values (with units)")
+(options, files) = parser.parse_args()
+
+def sum(items):
+ return reduce(lambda x,y: x+y, items)
+
+def arithmeticMean(items):
+ return sum(items) / len(items)
+
+def standardDeviation(mean, items):
+ deltaSquares = [(item - mean) ** 2 for item in items]
+ return math.sqrt(sum(deltaSquares) / (len(items) - 1))
+
+def standardError(stdDev, items):
+ return stdDev / math.sqrt(len(items))
+
+# t-distribution for 2-sided 95% confidence intervals
+tDistribution = [float('NaN'), float('NaN'), 12.71, 4.30, 3.18, 2.78, 2.57, 2.45, 2.36, 2.31, 2.26, 2.23, 2.20, 2.18, 2.16, 2.14, 2.13, 2.12, 2.11, 2.10, 2.09, 2.09, 2.08, 2.07, 2.07, 2.06, 2.06, 2.06, 2.05, 2.05, 2.05, 2.04, 2.04, 2.04, 2.03, 2.03, 2.03, 2.03, 2.03, 2.02, 2.02, 2.02, 2.02, 2.02, 2.02, 2.02, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.01, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 2.00, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.99, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.98, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.97, 1.96]
+tMax = len(tDistribution)
+tLimit = 1.96
+
+def tDist(n):
+ if n > tMax:
+ return tLimit
+ return tDistribution[n]
+
+def twoSidedConfidenceInterval(items):
+ mean = arithmeticMean(items)
+ stdDev = standardDeviation(mean, items)
+ stdErr = standardError(stdDev, items)
+ return tDist(len(items)) * stdErr
+
+results = []
+
+decimalNumberPattern = re.compile(r"\d+\.?\d*")
+for line in fileinput.input(files):
+ match = re.search(decimalNumberPattern, line)
+ if match:
+ results.append(float(match.group(0)))
+
+if len(results) == 0:
+ parser.print_help()
+ quit()
+
+
+mean = arithmeticMean(results)
+confidenceInterval = twoSidedConfidenceInterval(results)
+confidencePercent = 100 * confidenceInterval / mean
+
+if options.verbose:
+ length = 7
+ for item in results:
+ line = " %.2f %s" % (item, options.unit)
+ print line
+ length = len(line) if len(line) > length else length
+
+ print "-" * length
+
+prefix = "Mean: " if options.verbose else ""
+print "%s%.2f %s +/- %.2f %s (%.1f%%)" % (prefix, mean, options.unit, confidenceInterval, options.unit, confidencePercent)
+
diff --git a/Tools/Scripts/svn-create-patch b/Tools/Scripts/svn-create-patch
index b830f9bd3..06e016893 100755
--- a/Tools/Scripts/svn-create-patch
+++ b/Tools/Scripts/svn-create-patch
@@ -101,16 +101,9 @@ for my $path (keys %paths) {
my $svnRoot = determineSVNRoot();
my $prefix = chdirReturningRelativePath($svnRoot);
-my $patchSize = 0;
-
-# Generate the diffs, in a order chosen for easy reviewing.
+# Generate the diffs, in an order chosen for ease of reviewing.
for my $path (sort patchpathcmp values %diffFiles) {
- $patchSize += generateDiff($path, $prefix);
-}
-
-if ($patchSize > 20480) {
- print STDERR "WARNING: Patch's size is " . int($patchSize/1024) . " kbytes.\n";
- print STDERR "Patches 20k or smaller are more likely to be reviewed. Larger patches may sit unreviewed for a long time.\n";
+ generateDiff($path, $prefix);
}
exit 0;
@@ -245,7 +238,6 @@ sub generateDiff($$)
print "\n" if ($patch && $patch =~ m/\n\S+$/m);
outputBinaryContent($file);
}
- return length($patch);
}
sub generateFileList($\%)
diff --git a/Tools/Scripts/test-webkit-scripts b/Tools/Scripts/test-webkit-scripts
index 781e8ce0f..baba0598f 100755
--- a/Tools/Scripts/test-webkit-scripts
+++ b/Tools/Scripts/test-webkit-scripts
@@ -28,7 +28,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-"""Run unit tests of WebKit's Perl and Python scripts."""
+"""Run unit tests of WebKit's Perl, Python, and Ruby scripts."""
# The docstring above is passed as the "description" to the OptionParser
# used in this script's __main__ block.
@@ -72,11 +72,12 @@ class ScriptsTester(object):
self.run_test_script('Perl scripts', self.script_path('test-webkitperl'))
self.run_test_script('Python scripts', self.script_path('test-webkitpy'),
['--all'] if options.all else None)
+ self.run_test_script('Ruby scripts', self.script_path('test-webkitruby'))
# FIXME: Display a cumulative indication of success or failure.
# In addition, call sys.exit() with 0 or 1 depending on that
# cumulative success or failure.
- print('Note: Perl and Python results appear separately above.')
+ print('Note: Perl, Python, and Ruby results appear separately above.')
if __name__ == '__main__':
diff --git a/Tools/Scripts/test-webkitruby b/Tools/Scripts/test-webkitruby
new file mode 100755
index 000000000..cd04a0ab5
--- /dev/null
+++ b/Tools/Scripts/test-webkitruby
@@ -0,0 +1,34 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$exit_code = 0;
+
+Dir.chdir File.dirname(__FILE__)
+Dir.glob("./webkitruby/*/*.rb").each do |test|
+ puts %x{ '#{test}' }
+ $exit_code = 1 if $?.exitstatus != 0
+end
+
+exit $exit_code
diff --git a/Tools/Scripts/update-sources-list.py b/Tools/Scripts/update-sources-list.py
deleted file mode 100755
index 2a4a5eff0..000000000
--- a/Tools/Scripts/update-sources-list.py
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env python
-
-# Copyright (C) 2007 Kevin Ollivier All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
-# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Make sure any port-independent files added to the Bakefile are
-# added to GTK, QT, etc. so that file updates can happen in one place.
-
-import os, sys
-from xml.dom import minidom
-
-scriptDir = os.path.abspath(sys.path[0])
-wkroot = os.path.abspath(os.path.join(scriptDir, "../.."))
-
-def getWebCoreFilesDict():
- """
- This method parses the WebCoreSources.bkl file, which has a list of all sources not specific
- to any port, and returns the result as a dictionary with items of the form
- (groupName, groupFiles).
- """
- sources = {}
- sources_prefix = "WEBCORE_"
- filepath = os.path.join(wkroot, "Source", "WebCore", "WebCoreSources.bkl")
- assert(os.path.exists(filepath))
-
- doc = minidom.parse(filepath)
- for sourceGroup in doc.getElementsByTagName("set"):
- groupName = ""
- if sourceGroup.attributes.has_key("var"):
- groupName = sourceGroup.attributes["var"].value
- groupName = groupName.replace(sources_prefix, "")
-
- sourcesList = []
- for node in sourceGroup.childNodes:
- if node.nodeType == node.TEXT_NODE:
- sourcesText = node.nodeValue.strip()
- sourcesList = sourcesText.split("\n")
-
- assert(groupName != "")
- assert(sourcesList != [])
-
- sources[groupName] = sourcesList
-
- return sources
-
-def generateWebCoreSourcesGTKAndQT(sources):
- """
- Convert the dictionary obtained from getWebCoreFilesDict() into a Unix makefile syntax,
- which IIUC is suitable for both GTK and QT build systems. To take advantage of this,
- QT and GTK would have to include the file "WebCore/sources.inc" into their makefiles.
- """
- makefileString = ""
-
- for key in sources.keys():
- makefileString += key + "+="
- for source in sources[key]:
- makefileString += " \\\n\t\t" + source.strip()
-
- makefileString += "\n\n"
-
- makefileString += "BASE_SOURCES +="
- for key in sources.keys():
- makefileString += " \\\n\t\t" + key
-
- outfile = os.path.join(wkroot, "Source", "WebCore", "sources.inc")
- sourcefile = open(outfile, "w")
- sourcefile.write(makefileString)
- sourcefile.close()
-
-sources = getWebCoreFilesDict()
-generateWebCoreSourcesGTKAndQT(sources)
-
-# Coming soon - MSVC and hopefully XCode support!
diff --git a/Tools/Scripts/update-webkit b/Tools/Scripts/update-webkit
index 6ba8044f4..485eb748b 100755
--- a/Tools/Scripts/update-webkit
+++ b/Tools/Scripts/update-webkit
@@ -45,12 +45,7 @@ sub runGitUpdate();
# Handle options
my $quiet = '';
my $showHelp;
-my $useGYP = 0;
-my $useMake = 0;
-my $useNinja = 0;
-determineIsChromium();
-determineIsChromiumAndroid();
determineIsQt();
determineIsWinCairo();
@@ -59,33 +54,18 @@ chdirWebKit();
my $getOptionsResult = GetOptions(
'h|help' => \$showHelp,
'q|quiet' => \$quiet,
- 'gyp' => \$useGYP,
- 'make' => \$useMake,
- 'ninja' => \$useNinja,
);
if (!$getOptionsResult || $showHelp) {
print STDERR <<__END__;
Usage: @{[ basename($0) ]} [options]
- --chromium also update dependencies of the chromium port
- --make generate the Makefile-based build system (Chromium only)
- --ninja generate the ninja-based build system (Chromium only)
- --chromium-android also update dependencies of the chromium port for Android
-h|--help show the help message
-q|--quiet pass -q to svn update for quiet updates
- --gyp generate project files from gyp after update
--wincairo also update dependencies of the WinCairo port
__END__
exit 1;
}
-if ($useMake) {
- $ENV{"GYP_GENERATORS"} = "make";
-}
-if ($useNinja) {
- $ENV{"GYP_GENERATORS"} = "ninja";
-}
-
my @svnOptions = ();
push @svnOptions, '-q' if $quiet;
@@ -101,11 +81,6 @@ if (-d "../Internal") {
print "Updating Internal\n" unless $quiet;
runSvnUpdate() if isSVNDirectory(".");
runGitUpdate() if isGitDirectory(".");
-} elsif (isChromium()) {
- my @chromiumUpdateArgs = ("perl", "Tools/Scripts/update-webkit-chromium");
- push @chromiumUpdateArgs, "--chromium-android" if isChromiumAndroid();
- push @chromiumUpdateArgs, "--force" if forceChromiumUpdate();
- system(@chromiumUpdateArgs) == 0 or die $!;
} elsif (isAppleWinWebKit()) {
system("perl", "Tools/Scripts/update-webkit-auxiliary-libs") == 0 or die;
if (isWinCairo()) {
@@ -116,11 +91,6 @@ if (-d "../Internal") {
setupAppleWinEnv() if isAppleWinWebKit();
-if ($useGYP) {
- print "Generating Project Files\n";
- system("perl", "Tools/Scripts/generate-project-files") == 0 or die "Failed to run generate-project-files";
-}
-
exit 0;
sub runSvnUpdate()
diff --git a/Tools/Scripts/update-webkit-chromium b/Tools/Scripts/update-webkit-chromium
deleted file mode 100755
index b21d09ec5..000000000
--- a/Tools/Scripts/update-webkit-chromium
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/perl -w
-# Copyright (C) 2009 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-# 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
-# its contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Update script for the WebKit Chromium Port.
-
-use File::Path;
-use FindBin;
-use Getopt::Long;
-use lib $FindBin::Bin;
-use webkitdirs;
-
-determineIsChromiumAndroid();
-
-chdirWebKit();
-chdir("Source/WebKit/chromium") or die $!;
-
-# Find gclient or install it.
-my $gclientPath;
-if (commandExists('gclient')) {
- $gclientPath = 'gclient';
-} elsif (-e 'depot_tools/gclient') {
- $gclientPath = 'depot_tools/gclient';
-} else {
- print "Installing chromium's depot_tools...\n";
- system("svn co http://src.chromium.org/svn/trunk/tools/depot_tools") == 0 or die $1;
- $gclientPath = 'depot_tools/gclient';
-}
-
-if (! -e ".gclient") {
- # If .gclient configuration file doesn't exist, create it.
- print "Configuring gclient...\n";
- system($gclientPath,
- "config",
- "--spec=solutions=[{'name':'./','url':None}]") == 0 or die $!;
-}
-
-# When building Chromium for Android, the envsetup.sh script needs to be
-# executed prior to project file generation. We need to tell gyp_webkit to do
-# that, as it's a Chromium file and may not be available yet right now.
-if (isChromiumAndroid()) {
- $ENV{WEBKIT_ANDROID_BUILD} = 1;
-}
-
-my $force = 0;
-GetOptions(
- 'force' => \$force,
-);
-
-# Execute gclient sync.
-print "Updating chromium port dependencies using gclient...\n";
-my @gclientArgs = ($gclientPath, "sync");
-push @gclientArgs, "--force" if $force;
-# --reset could delete modified files if necessary to sync.
-push @gclientArgs, "--reset" if $force;
-push @gclientArgs, "--delete_unversioned_trees" if $force;
-push @gclientArgs, "--deps=unix,android" if isChromiumAndroid();
-
-my $cmd = join(" ",@gclientArgs);
-my $max_attempts = 3;
-my $rc = -1;
-
-# The following will call glient up to $max_attempts times before
-# it gives up and fails. We need this because glcient can fail
-# for several reasons, some of which are transient (flakiness).
-
-for (1 .. $max_attempts) {
- $rc = system($cmd);
- print "Re-trying '" . $cmd . "'\n" if $rc != 0;
- last if $rc == 0;
-}
-
-die "Error: '$cmd' failed $max_attempts tries and returned " . $rc if ($rc);
diff --git a/Tools/Scripts/update-webkit-dependency b/Tools/Scripts/update-webkit-dependency
index 71d46728f..2654229cc 100755
--- a/Tools/Scripts/update-webkit-dependency
+++ b/Tools/Scripts/update-webkit-dependency
@@ -67,7 +67,7 @@ my $prefixInZip = shift;
my $sourceDir = sourceDir();
my $file = getLibraryName($libsURL);
my $zipFile = "$file.zip";
-my $webkitLibrariesDir = toUnixPath($ENV{'WEBKITLIBRARIESDIR'}) || "$sourceDir/WebKitLibraries/win";
+my $webkitLibrariesDir = toUnixPath($ENV{'WEBKIT_LIBRARIES'}) || "$sourceDir/WebKitLibraries/win";
my $tmpRelativeDir = File::Temp::tempdir("webkitlibsXXXXXXX", TMPDIR => 1, CLEANUP => 1);
my $tmpAbsDir = File::Spec->rel2abs($tmpRelativeDir);
diff --git a/Tools/Scripts/update-webkit-libs-jhbuild b/Tools/Scripts/update-webkit-libs-jhbuild
index 51605bded..74dd7b6c5 100755
--- a/Tools/Scripts/update-webkit-libs-jhbuild
+++ b/Tools/Scripts/update-webkit-libs-jhbuild
@@ -101,8 +101,6 @@ sub runJhbuild
sub cleanJhbuild()
{
- runJhbuild("clean");
-
# If the configuration changed, dependencies may have been removed.
# Since we lack a granular way of uninstalling those we wipe out the
# jhbuild root and start from scratch.
@@ -110,6 +108,10 @@ sub cleanJhbuild()
if (system("rm -rf $jhbuildPath/Root") ne 0) {
die "Cleaning jhbuild root failed!";
}
+
+ if (system("rm -rf $jhbuildPath/Source") ne 0) {
+ die "Cleaning jhbuild sources failed!";
+ }
}
delete $ENV{AR_FLAGS} if exists $ENV{AR_FLAGS};
@@ -122,7 +124,7 @@ if (-e getJhbuildPath() && jhbuildConfigurationChanged()) {
cleanJhbuild();
}
+saveJhbuildMd5();
+
print "Updating " . $prettyPlatform{$platform} . " port dependencies using jhbuild...\n";
runJhbuild("build");
-
-saveJhbuildMd5();
diff --git a/Tools/Scripts/update-webkit-localizable-strings b/Tools/Scripts/update-webkit-localizable-strings
index ceb25a58a..8a537bffb 100755
--- a/Tools/Scripts/update-webkit-localizable-strings
+++ b/Tools/Scripts/update-webkit-localizable-strings
@@ -1,6 +1,6 @@
#!/usr/bin/perl -w
-# Copyright (C) 2006, 2007 Apple Inc. All rights reserved.
+# Copyright (C) 2006, 2007, 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -35,11 +35,15 @@ use lib $FindBin::Bin;
use webkitdirs;
# WebKit and WebKit2 strings go into WebCore's Localizable.strings.
-my @directoriesToScan = ("Source/WebCore", "Source/WebKit/mac", "Source/WebKit/win", "Source/WebKit2", "-Source/WebCore/icu", "-Source/WebKit/mac/icu");
-my $fileToUpdate = "Source/WebCore/English.lproj/Localizable.strings";
+my @webKitDirectoriesToScan = ("Source/WebCore", "Source/WebKit/mac", "Source/WebKit/win", "Source/WebKit2", "-Source/WebCore/icu", "-Source/WebKit/mac/icu");
+my @webInspectorUIDirectoriesToScan = ("Source/WebInspectorUI/UserInterface");
+
+my $webCoreFileToUpdate = "Source/WebCore/English.lproj/Localizable.strings";
+my $webInspectorUIFileToUpdate = "Source/WebInspectorUI/Localizations/en.lproj/localizedStrings.js";
@ARGV == 0 or die "Usage: " . basename($0) . "\n";
chdirWebKit();
-system "Tools/Scripts/extract-localizable-strings", "-", $fileToUpdate, @directoriesToScan;
+system "Tools/Scripts/extract-localizable-strings", "-", $webCoreFileToUpdate, @webKitDirectoriesToScan;
+system "Tools/Scripts/extract-localizable-js-strings", $webInspectorUIFileToUpdate, @webInspectorUIDirectoriesToScan;
diff --git a/Tools/Scripts/update-webkit-support-libs b/Tools/Scripts/update-webkit-support-libs
index afced1d28..37f3ac9da 100755
--- a/Tools/Scripts/update-webkit-support-libs
+++ b/Tools/Scripts/update-webkit-support-libs
@@ -46,7 +46,7 @@ my $file = "WebKitSupportLibrary";
my $zipFile = "$file.zip";
my $zipDirectory = toUnixPath($ENV{'WEBKITSUPPORTLIBRARIESZIPDIR'}) || $sourceDir;
my $pathToZip = File::Spec->catfile($zipDirectory, $zipFile);
-my $webkitLibrariesDir = toUnixPath($ENV{'WEBKITLIBRARIESDIR'}) || "$sourceDir/WebKitLibraries/win";
+my $webkitLibrariesDir = toUnixPath($ENV{'WEBKIT_LIBRARIES'}) || "$sourceDir/WebKitLibraries/win";
my $versionFile = $file . "Version";
my $pathToVersionFile = File::Spec->catfile($webkitLibrariesDir, $versionFile);
my $tmpRelativeDir = File::Temp::tempdir("webkitlibsXXXXXXX", TMPDIR => 1, CLEANUP => 1);
diff --git a/Tools/Scripts/update-webkit-wincairo-libs b/Tools/Scripts/update-webkit-wincairo-libs
index 52d052e72..4a2bb0f35 100755
--- a/Tools/Scripts/update-webkit-wincairo-libs
+++ b/Tools/Scripts/update-webkit-wincairo-libs
@@ -35,7 +35,7 @@ use FindBin;
my $file = "WinCairoRequirements";
my $zipFile = "$file.zip";
-my $winCairoLibsURL = "http://dl.dropbox.com/u/39598926/$zipFile";
+my $winCairoLibsURL = "https://dl.dropboxusercontent.com/u/39598926/$zipFile";
my $command = "$FindBin::Bin/update-webkit-dependency";
system("perl", $command, $winCairoLibsURL, ".") == 0 or die;
diff --git a/Tools/Scripts/validate-committer-lists b/Tools/Scripts/validate-committer-lists
index 96763d430..62ff9bc52 100755
--- a/Tools/Scripts/validate-committer-lists
+++ b/Tools/Scripts/validate-committer-lists
@@ -31,6 +31,7 @@
# Checks Python's known list of committers against lists.webkit.org and SVN history.
+import logging
import os
import subprocess
import re
@@ -39,7 +40,6 @@ from datetime import date, datetime, timedelta
from optparse import OptionParser
from webkitpy.common.config.committers import CommitterList
-from webkitpy.common.system.deprecated_logging import log, error
from webkitpy.common.checkout.scm import Git
from webkitpy.common.net.bugzilla import Bugzilla
@@ -47,6 +47,7 @@ from webkitpy.common.net.bugzilla import Bugzilla
# so this import should always succeed.
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+_log = logging.getLogger(__name__)
def print_list_if_non_empty(title, list_to_print):
if not list_to_print:
@@ -58,8 +59,8 @@ def print_list_if_non_empty(title, list_to_print):
class CommitterListFromMailingList(object):
- committers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-committers"
- reviewers_list_url = "http://lists.webkit.org/mailman/roster.cgi/webkit-reviewers"
+ committers_list_url = "http://lists.webkit.org/mailman/roster/webkit-committers"
+ reviewers_list_url = "http://lists.webkit.org/mailman/roster/webkit-reviewers"
def _fetch_emails_from_page(self, url):
page = urllib2.urlopen(url)
@@ -101,7 +102,7 @@ class CommitterListFromMailingList(object):
print_list_if_non_empty("Committers missing from %s:" % list_name, missing_from_mailing_list)
users_missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), committer_emails)
- print_list_if_non_empty("Subcribers to %s missing from committer.py:" % list_name, users_missing_from_committers)
+ print_list_if_non_empty("Subcribers to %s missing from contributors.json:" % list_name, users_missing_from_committers)
reviewer_emails = self._fetch_emails_from_page(self.reviewers_list_url)
@@ -111,10 +112,10 @@ class CommitterListFromMailingList(object):
print_list_if_non_empty("Reviewers missing from %s:" % list_name, missing_from_mailing_list)
missing_from_reviewers = self._emails_not_found_in_committer_list(committer_list.reviewers(), reviewer_emails)
- print_list_if_non_empty("Subcribers to %s missing from reviewers in committer.py:" % list_name, missing_from_reviewers)
+ print_list_if_non_empty("Subcribers to %s missing from reviewers in contributors.json:" % list_name, missing_from_reviewers)
missing_from_committers = self._emails_not_found_in_committer_list(committer_list.committers(), reviewer_emails)
- print_list_if_non_empty("Subcribers to %s completely missing from committers.py" % list_name, missing_from_committers)
+ print_list_if_non_empty("Subcribers to %s completely missing from contributors.json:" % list_name, missing_from_committers)
class CommitterListFromGit(object):
@@ -173,7 +174,8 @@ class CommitterListFromGit(object):
match_result = line_regexp.match(output_line)
if not match_result:
- error("Failed to match line: %s" % output_line)
+ _log.error("Failed to match line: %s" % output_line)
+ exit(1)
last_commit_dates[match_result.group('author')] = float(match_result.group('timestamp'))
def _fill_in_emails_for_old_logins(self):
@@ -194,7 +196,7 @@ class CommitterListFromGit(object):
# No known email mapping, likely not an active committer. We could log here.
continue
- # log("%s -> %s" % (author, author_email)) # For sanity checking.
+ # _log.info("%s -> %s" % (author, author_email)) # For sanity checking.
no_email_commit_time = self._last_commit_time_by_author_cache.get(author)
email_commit_time = self._last_commit_time_by_author_cache.get(author_email)
# We compare the timestamps for extra sanity even though we could assume commits before email address were used for login are always going to be older.
@@ -228,11 +230,11 @@ class CommitterListFromGit(object):
self._print_three_column_row(column_widths, (str(last_commit_date), author, committer_record))
def print_committers_missing_from_committer_list(self, committer_list):
- missing_from_committers_py = []
+ missing_from_contributors_json = []
last_commit_time_by_author = self._last_commit_by_author()
for author in last_commit_time_by_author:
if not committer_list.committer_by_email(author):
- missing_from_committers_py.append(author)
+ missing_from_contributors_json.append(author)
never_committed = []
for committer in committer_list.committers():
@@ -242,8 +244,8 @@ class CommitterListFromGit(object):
else:
never_committed.append(committer)
- print_list_if_non_empty("Historical committers missing from committer.py:", missing_from_committers_py)
- print_list_if_non_empty("Committers in committer.py who have never committed:", never_committed)
+ print_list_if_non_empty("Historical committers missing from contributors.json:", missing_from_contributors_json)
+ print_list_if_non_empty("Committers in contributors.json who have never committed:", never_committed)
class CommitterListBugzillaChecker(object):
@@ -272,7 +274,7 @@ def main():
print """\n\nWARNING: validate-committer-lists requires a git checkout.
The following checks are disabled:
- List of committers ordered by last commit
- - List of historical committers missing from committers.py
+ - List of historical committers missing from contributors.json
"""
return 1
svn_committer_list = CommitterListFromGit()
diff --git a/Tools/Scripts/webkit-build-directory b/Tools/Scripts/webkit-build-directory
index d5085b594..d19ce0418 100755
--- a/Tools/Scripts/webkit-build-directory
+++ b/Tools/Scripts/webkit-build-directory
@@ -47,8 +47,6 @@ Usage: $programName [options]
--top-level Show the top-level build directory
--blackberry Find the build directory for the BlackBerry port on Mac/Linux
- --chromium Find the build directory for the Chromium port on Mac/Win/Linux
- --chromium-android Find the build directory for the Chromium port on Android
--efl Find the build directory for the EFL port
--gtk Find the build directory for the GTK+ port
--qt Find the build directory for the Qt port
diff --git a/Tools/Scripts/webkit-tools-completion.sh b/Tools/Scripts/webkit-tools-completion.sh
index d83a5a6ce..a57a1aba5 100755
--- a/Tools/Scripts/webkit-tools-completion.sh
+++ b/Tools/Scripts/webkit-tools-completion.sh
@@ -111,4 +111,4 @@ complete -F _webkit-patch_complete webkit-patch
complete -o default -W "--continue --fix-merged --help --no-continue --no-warnings --warnings -c -f -h -w" resolve-ChangeLogs
complete -o default -W "--bug --diff --git-commit --git-index --git-reviewer --help --no-update --no-write --open --update --write -d -h -o" prepare-ChangeLog
complete -W "--clean --debug --help -h" build-webkit
-complete -o default -W "--add-platform-exceptions --complex-text --configuration --guard-malloc --help --http --ignore-tests --launch-safari --leaks --merge-leak-depth --new-test-results --no-http --no-launch-safari --no-new-test-results --no-sample-on-timeout --no-strip-editing-callbacks --pixel-tests --platform --port --quiet --random --reset-results --results-directory --reverse --root --sample-on-timeout --singly --skipped --slowest --strict --strip-editing-callbacks --threaded --timeout --tolerance --use-remote-links-to-tests --valgrind --verbose -1 -c -g -h -i -l -m -o -p -q -t -v" run-webkit-tests
+complete -o default -W "--add-platform-exceptions --complex-text --configuration --guard-malloc --help --http --ignore-tests --launch-safari --leaks --merge-leak-depth --new-test-results --no-http --no-show-results --no-new-test-results --no-sample-on-timeout --no-strip-editing-callbacks --pixel-tests --platform --port --quiet --random --reset-results --results-directory --reverse --root --sample-on-timeout --singly --skipped --slowest --strict --strip-editing-callbacks --threaded --timeout --tolerance --use-remote-links-to-tests --valgrind --verbose -1 -c -g -h -i -l -m -o -p -q -t -v" run-webkit-tests
diff --git a/Tools/Scripts/webkitdirs.pm b/Tools/Scripts/webkitdirs.pm
index 619a76e19..cc30b16f5 100755
--- a/Tools/Scripts/webkitdirs.pm
+++ b/Tools/Scripts/webkitdirs.pm
@@ -80,6 +80,7 @@ my $numberOfCPUs;
my $baseProductDir;
my @baseProductDirOption;
my $configuration;
+my $xcodeSDK;
my $configurationForVisualStudio;
my $configurationProductDir;
my $sourceDir;
@@ -93,15 +94,8 @@ my $qmakebin = "qmake"; # Allow override of the qmake binary from $PATH
my $isGtk;
my $isWinCE;
my $isWinCairo;
-my $isWx;
my $isEfl;
-my @wxArgs;
my $isBlackBerry;
-my $isChromium;
-my $isChromiumAndroid;
-my $isChromiumMacMake;
-my $isChromiumNinja;
-my $forceChromiumUpdate;
my $isInspectorFrontend;
my $isWK2;
my $shouldTargetWebProcess;
@@ -110,7 +104,10 @@ my $shouldUseGuardMalloc;
my $xcodeVersion;
# Variables for Win32 support
+my $programFilesPath;
my $vcBuildPath;
+my $vsInstallDir;
+my $vsVersion;
my $windowsSourceDir;
my $winVersion;
my $willUseVCExpressWhenBuilding = 0;
@@ -186,7 +183,7 @@ sub determineBaseProductDir
determineSourceDir();
my $setSharedPrecompsDir;
- $baseProductDir = $ENV{"WEBKITOUTPUTDIR"};
+ $baseProductDir = $ENV{"WEBKIT_OUTPUTDIR"};
if (!defined($baseProductDir) and isAppleMacWebKit()) {
# Silently remove ~/Library/Preferences/xcodebuild.plist which can
@@ -221,14 +218,6 @@ sub determineBaseProductDir
}
undef $baseProductDir unless $baseProductDir =~ /^\//;
- } elsif (isChromium()) {
- if (isLinux() || isChromiumAndroid() || isChromiumMacMake()) {
- $baseProductDir = "$sourceDir/out";
- } elsif (isDarwin()) {
- $baseProductDir = "$sourceDir/Source/WebKit/chromium/xcodebuild";
- } elsif (isWindows() || isCygwin()) {
- $baseProductDir = "$sourceDir/Source/WebKit/chromium/build";
- }
}
if (!defined($baseProductDir)) { # Port-specific checks failed, use default
@@ -240,7 +229,7 @@ sub determineBaseProductDir
$baseProductDir = "$baseProductDir/" . $archInfo{"cpuDir"};
}
- if (isGit() && isGitBranchBuild() && !isChromium()) {
+ if (isGit() && isGitBranchBuild()) {
my $branch = gitBranch();
$baseProductDir = "$baseProductDir/$branch";
}
@@ -258,7 +247,7 @@ sub determineBaseProductDir
if (isCygwin()) {
my $dosBuildPath = `cygpath --windows \"$baseProductDir\"`;
chomp $dosBuildPath;
- $ENV{"WEBKITOUTPUTDIR"} = $dosBuildPath;
+ $ENV{"WEBKIT_OUTPUTDIR"} = $dosBuildPath;
my $unixBuildPath = `cygpath --unix \"$baseProductDir\"`;
chomp $unixBuildPath;
$baseProductDir = $unixBuildPath;
@@ -288,8 +277,8 @@ sub determineConfiguration
}
if ($configuration && isWinCairo()) {
- unless ($configuration =~ /_Cairo_CFLite$/) {
- $configuration .= "_Cairo_CFLite";
+ unless ($configuration =~ /_WinCairo$/) {
+ $configuration .= "_WinCairo";
}
}
}
@@ -301,10 +290,11 @@ sub determineArchitecture
$architecture = "";
determineBaseProductDir();
+ determineXcodeSDK();
if (isGtk()) {
determineConfigurationProductDir();
- my $host_triple = `grep -E '^host = ' $configurationProductDir/GNUmakefile`;
+ my $host_triple = `grep -E '^host = ' $configurationProductDir/GNUmakefile 2> /dev/null`;
if ($host_triple =~ m/^host = ([^-]+)-/) {
# We have a configured build tree; use it.
$architecture = $1;
@@ -317,9 +307,15 @@ sub determineArchitecture
if ($architecture) {
chomp $architecture;
} else {
- my $supports64Bit = `sysctl -n hw.optional.x86_64`;
- chomp $supports64Bit;
- $architecture = 'x86_64' if $supports64Bit;
+ if (not defined $xcodeSDK or $xcodeSDK =~ /^(\/$|macosx)/) {
+ my $supports64Bit = `sysctl -n hw.optional.x86_64`;
+ chomp $supports64Bit;
+ $architecture = 'x86_64' if $supports64Bit;
+ } elsif ($xcodeSDK =~ /^iphonesimulator/) {
+ $architecture = 'i386';
+ } elsif ($xcodeSDK =~ /^iphoneos/) {
+ $architecture = 'armv7';
+ }
}
} elsif (isEfl()) {
my $host_processor = "";
@@ -388,14 +384,70 @@ sub argumentsForConfiguration()
push(@args, '--efl') if isEfl();
push(@args, '--wincairo') if isWinCairo();
push(@args, '--wince') if isWinCE();
- push(@args, '--wx') if isWx();
push(@args, '--blackberry') if isBlackBerry();
- push(@args, '--chromium') if isChromium() && !isChromiumAndroid();
- push(@args, '--chromium-android') if isChromiumAndroid();
push(@args, '--inspector-frontend') if isInspectorFrontend();
return @args;
}
+sub determineXcodeSDK
+{
+ return if defined $xcodeSDK;
+ for (my $i = 0; $i <= $#ARGV; $i++) {
+ my $opt = $ARGV[$i];
+ if ($opt =~ /^--sdk$/i) {
+ splice(@ARGV, $i, 1);
+ $xcodeSDK = splice(@ARGV, $i, 1);
+ } elsif ($opt =~ /^--device$/i) {
+ splice(@ARGV, $i, 1);
+ $xcodeSDK = 'iphoneos.internal';
+ } elsif ($opt =~ /^--sim(ulator)?/i) {
+ splice(@ARGV, $i, 1);
+ $xcodeSDK = 'iphonesimulator';
+ }
+ }
+}
+
+sub xcodeSDK
+{
+ determineXcodeSDK();
+ return $xcodeSDK;
+}
+
+sub programFilesPath
+{
+ return $programFilesPath if defined $programFilesPath;
+
+ $programFilesPath = $ENV{'PROGRAMFILES(X86)'} || $ENV{'PROGRAMFILES'} || "C:\\Program Files";
+
+ return $programFilesPath;
+}
+
+sub visualStudioInstallDir
+{
+ return $vsInstallDir if defined $vsInstallDir;
+
+ if ($ENV{'VSINSTALLDIR'}) {
+ $vsInstallDir = $ENV{'VSINSTALLDIR'};
+ $vsInstallDir =~ s|[\\/]$||;
+ } else {
+ $vsInstallDir = File::Spec->catdir(programFilesPath(), "Microsoft Visual Studio 10.0");
+ }
+ chomp($vsInstallDir = `cygpath "$vsInstallDir"`) if isCygwin();
+
+ return $vsInstallDir;
+}
+
+sub visualStudioVersion
+{
+ return $vsVersion if defined $vsVersion;
+
+ my $installDir = visualStudioInstallDir();
+
+ $vsVersion = ($installDir =~ /Microsoft Visual Studio ([0-9]+\.[0-9]*)/) ? $1 : "8";
+
+ return $vsVersion;
+}
+
sub determineConfigurationForVisualStudio
{
return if defined $configurationForVisualStudio;
@@ -408,9 +460,9 @@ sub usesPerConfigurationBuildDirectory
{
# [Gtk] We don't have Release/Debug configurations in straight
# autotool builds (non build-webkit). In this case and if
- # WEBKITOUTPUTDIR exist, use that as our configuration dir. This will
+ # WEBKIT_OUTPUTDIR exist, use that as our configuration dir. This will
# allows us to run run-webkit-tests without using build-webkit.
- return ($ENV{"WEBKITOUTPUTDIR"} && isGtk()) || isAppleWinWebKit();
+ return ($ENV{"WEBKIT_OUTPUTDIR"} && isGtk()) || isAppleWinWebKit();
}
sub determineConfigurationProductDir
@@ -418,8 +470,9 @@ sub determineConfigurationProductDir
return if defined $configurationProductDir;
determineBaseProductDir();
determineConfiguration();
- if (isAppleWinWebKit() && !isWx()) {
- $configurationProductDir = File::Spec->catdir($baseProductDir, configurationForVisualStudio(), "bin");
+ if (isAppleWinWebKit()) {
+ my $binDir = "bin32";
+ $configurationProductDir = File::Spec->catdir($baseProductDir, configurationForVisualStudio(), $binDir);
} else {
if (usesPerConfigurationBuildDirectory()) {
$configurationProductDir = "$baseProductDir";
@@ -520,7 +573,12 @@ sub XcodeOptions
determineBaseProductDir();
determineConfiguration();
determineArchitecture();
- return (@baseProductDirOption, "-configuration", $configuration, "ARCHS=$architecture", argumentsForXcode());
+ determineXcodeSDK();
+
+ my @sdkOption = ($xcodeSDK ? "SDKROOT=$xcodeSDK" : ());
+ my @architectureOption = ($architecture ? "ARCHS=$architecture" : ());
+
+ return (@baseProductDirOption, "-configuration", $configuration, @architectureOption, @sdkOption, argumentsForXcode());
}
sub XcodeOptionString
@@ -538,9 +596,6 @@ sub XcodeCoverageSupportOptions()
my @coverageSupportOptions = ();
push @coverageSupportOptions, "GCC_GENERATE_TEST_COVERAGE_FILES=YES";
push @coverageSupportOptions, "GCC_INSTRUMENT_PROGRAM_FLOW_ARCS=YES";
- push @coverageSupportOptions, "EXTRA_LINK= \$(EXTRA_LINK) -ftest-coverage -fprofile-arcs";
- push @coverageSupportOptions, "OTHER_CFLAGS= \$(OTHER_CFLAGS) -DCOVERAGE -MD";
- push @coverageSupportOptions, "OTHER_LDFLAGS=\$(OTHER_LDFLAGS) -ftest-coverage -fprofile-arcs -lgcov";
return @coverageSupportOptions;
}
@@ -553,22 +608,22 @@ sub determinePassedConfiguration
for my $i (0 .. $#ARGV) {
my $opt = $ARGV[$i];
- if ($opt =~ /^--debug$/i || $opt =~ /^--devel/i) {
+ if ($opt =~ /^--debug$/i) {
splice(@ARGV, $i, 1);
$passedConfiguration = "Debug";
- $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());
+ $passedConfiguration .= "_WinCairo" if (isWinCairo() && isCygwin());
return;
}
- if ($opt =~ /^--release$/i || $opt =~ /^--deploy/i) {
+ if ($opt =~ /^--release$/i) {
splice(@ARGV, $i, 1);
$passedConfiguration = "Release";
- $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());
+ $passedConfiguration .= "_WinCairo" if (isWinCairo() && isCygwin());
return;
}
if ($opt =~ /^--profil(e|ing)$/i) {
splice(@ARGV, $i, 1);
$passedConfiguration = "Profiling";
- $passedConfiguration .= "_Cairo_CFLite" if (isWinCairo() && isCygwin());
+ $passedConfiguration .= "_WinCairo" if (isWinCairo() && isCygwin());
return;
}
}
@@ -606,7 +661,7 @@ sub determinePassedArchitecture
my $opt = $ARGV[$i];
if ($opt =~ /^--32-bit$/i) {
splice(@ARGV, $i, 1);
- if (isAppleMacWebKit() || isWx()) {
+ if (isAppleMacWebKit()) {
$passedArchitecture = `arch`;
chomp $passedArchitecture;
}
@@ -645,6 +700,11 @@ sub setArchitecture
$architecture = $passedArchitecture if $passedArchitecture;
}
+sub skipSafariExecutableEntitlementChecks
+{
+ return `defaults read /Library/Preferences/org.webkit.BuildConfiguration SkipSafariExecutableEntitlementChecks 2>/dev/null` eq "1\n";
+}
+
sub executableHasEntitlements
{
my $executablePath = shift;
@@ -657,8 +717,11 @@ sub safariPathFromSafariBundle
if (isAppleMacWebKit()) {
my $safariPath = "$safariBundle/Contents/MacOS/Safari";
+ return $safariPath if skipSafariExecutableEntitlementChecks();
+
my $safariForWebKitDevelopmentPath = "$safariBundle/Contents/MacOS/SafariForWebKitDevelopment";
return $safariForWebKitDevelopmentPath if -f $safariForWebKitDevelopmentPath && executableHasEntitlements($safariPath);
+
return $safariPath;
}
return $safariBundle if isAppleWinWebKit();
@@ -714,16 +777,18 @@ sub builtDylibPathForName
{
my $libraryName = shift;
determineConfigurationProductDir();
- if (isChromium()) {
- return "$configurationProductDir/$libraryName";
- }
+
if (isBlackBerry()) {
my $libraryExtension = $libraryName =~ /^WebKit$/i ? ".so" : ".a";
return "$configurationProductDir/$libraryName/lib" . lc($libraryName) . $libraryExtension;
}
if (isQt()) {
my $isSearchingForWebCore = $libraryName =~ "WebCore";
- $libraryName = "QtWebKitWidgets";
+ if (isDarwin()) {
+ $libraryName = "QtWebKitWidgets";
+ } else {
+ $libraryName = "Qt5WebKitWidgets";
+ }
my $result;
if (isDarwin() and -d "$configurationProductDir/lib/$libraryName.framework") {
$result = "$configurationProductDir/lib/$libraryName.framework/$libraryName";
@@ -759,12 +824,9 @@ sub builtDylibPathForName
return $result;
}
- if (isWx()) {
- return "$configurationProductDir/libwxwebkit.dylib";
- }
if (isGtk()) {
# WebKitGTK+ for GTK2, WebKitGTK+ for GTK3, and WebKit2 respectively.
- my @libraries = ("libwebkitgtk-1.0", "libwebkitgtk-3.0", "libwebkit2gtk-1.0");
+ my @libraries = ("libwebkitgtk-1.0", "libwebkitgtk-3.0", "libwebkit2gtk-3.0");
my $extension = isDarwin() ? ".dylib" : ".so";
foreach $libraryName (@libraries) {
@@ -774,6 +836,9 @@ sub builtDylibPathForName
return "NotFound";
}
if (isEfl()) {
+ if (isWK2()) {
+ return "$configurationProductDir/lib/libewebkit2.so";
+ }
return "$configurationProductDir/lib/libewebkit.so";
}
if (isWinCE()) {
@@ -919,8 +984,8 @@ sub determineIsQt()
return;
}
- # The presence of QTDIR only means Qt if --gtk or --wx or --efl or --blackberry or --chromium or --wincairo are not on the command-line
- if (isGtk() || isWx() || isEfl() || isBlackBerry() || isChromium() || isWinCairo()) {
+ # The presence of QTDIR only means Qt if --gtk or --efl or --blackberry or --wincairo are not on the command-line
+ if (isGtk() || isEfl() || isBlackBerry() || isWinCairo()) {
$isQt = 0;
return;
}
@@ -1001,14 +1066,17 @@ sub blackberryCMakeArguments()
push @cmakeExtraOptions, "-DENABLE_GLES2=1" unless $ENV{"DISABLE_GLES2"};
my @includeSystemDirectories;
- push @includeSystemDirectories, File::Spec->catdir($stageInc, "grskia", "skia");
- push @includeSystemDirectories, File::Spec->catdir($stageInc, "grskia");
- push @includeSystemDirectories, File::Spec->catdir($stageInc, "harfbuzz");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "harfbuzzng");
push @includeSystemDirectories, File::Spec->catdir($stageInc, "imf");
+ # We only use jpeg-turbo for device build
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "jpeg-turbo") if $arch=~/arm/;
push @includeSystemDirectories, $stageInc;
push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "platform");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "platform", "graphics");
push @includeSystemDirectories, File::Spec->catdir($stageInc, "browser", "qsk");
push @includeSystemDirectories, File::Spec->catdir($stageInc, "ots");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "iType", "common");
+ push @includeSystemDirectories, File::Spec->catdir($stageInc, "iType", "port", "nto");
my @cxxFlags;
push @cxxFlags, "-Wl,-rpath-link,$stageLib";
@@ -1092,35 +1160,6 @@ sub determineIsWinCE()
$isWinCE = checkForArgumentAndRemoveFromARGV("--wince");
}
-sub isWx()
-{
- determineIsWx();
- return $isWx;
-}
-
-sub determineIsWx()
-{
- return if defined($isWx);
- $isWx = checkForArgumentAndRemoveFromARGV("--wx");
-}
-
-sub getWxArgs()
-{
- if (!@wxArgs) {
- @wxArgs = ("");
- my $rawWxArgs = "";
- foreach my $opt (@ARGV) {
- if ($opt =~ /^--wx-args/i ) {
- @ARGV = grep(!/^--wx-args/i, @ARGV);
- $rawWxArgs = $opt;
- $rawWxArgs =~ s/--wx-args=//i;
- }
- }
- @wxArgs = split(/,/, $rawWxArgs);
- }
- return @wxArgs;
-}
-
# Determine if this is debian, ubuntu, linspire, or something similar.
sub isDebianBased()
{
@@ -1132,92 +1171,6 @@ sub isFedoraBased()
return -e "/etc/fedora-release";
}
-sub isChromium()
-{
- determineIsChromium();
- determineIsChromiumAndroid();
- return $isChromium || $isChromiumAndroid;
-}
-
-sub determineIsChromium()
-{
- return if defined($isChromium);
- $isChromium = checkForArgumentAndRemoveFromARGV("--chromium");
- if ($isChromium) {
- $forceChromiumUpdate = checkForArgumentAndRemoveFromARGV("--force-update");
- }
-}
-
-sub isChromiumAndroid()
-{
- determineIsChromiumAndroid();
- return $isChromiumAndroid;
-}
-
-sub determineIsChromiumAndroid()
-{
- return if defined($isChromiumAndroid);
- $isChromiumAndroid = checkForArgumentAndRemoveFromARGV("--chromium-android");
-}
-
-sub isChromiumMacMake()
-{
- determineIsChromiumMacMake();
- return $isChromiumMacMake;
-}
-
-sub determineIsChromiumMacMake()
-{
- return if defined($isChromiumMacMake);
-
- my $hasUpToDateMakefile = 0;
- if (-e 'Makefile.chromium') {
- unless (-e 'Source/WebKit/chromium/WebKit.xcodeproj') {
- $hasUpToDateMakefile = 1;
- } else {
- $hasUpToDateMakefile = stat('Makefile.chromium')->mtime > stat('Source/WebKit/chromium/WebKit.xcodeproj')->mtime;
- }
- }
- $isChromiumMacMake = isDarwin() && $hasUpToDateMakefile;
-}
-
-sub isChromiumNinja()
-{
- determineIsChromiumNinja();
- return $isChromiumNinja;
-}
-
-sub determineIsChromiumNinja()
-{
- return if defined($isChromiumNinja);
-
- my $config = configuration();
-
- my $hasUpToDateNinjabuild = 0;
- if (-e "out/$config/build.ninja") {
- my $statNinja = stat("out/$config/build.ninja")->mtime;
-
- my $statXcode = 0;
- if (-e 'Source/WebKit/chromium/WebKit.xcodeproj') {
- $statXcode = stat('Source/WebKit/chromium/WebKit.xcodeproj')->mtime;
- }
-
- my $statMake = 0;
- if (-e 'Makefile.chromium') {
- $statMake = stat('Makefile.chromium')->mtime;
- }
-
- $hasUpToDateNinjabuild = $statNinja > $statXcode && $statNinja > $statMake;
- }
- $isChromiumNinja = $hasUpToDateNinjabuild;
-}
-
-sub forceChromiumUpdate()
-{
- determineIsChromium();
- return $forceChromiumUpdate;
-}
-
sub isWinCairo()
{
determineIsWinCairo();
@@ -1302,7 +1255,7 @@ sub isFreeBSD()
sub isARM()
{
- return $Config{archname} =~ /^arm-/;
+ return $Config{archname} =~ /^arm[v\-]/;
}
sub isCrossCompilation()
@@ -1321,7 +1274,7 @@ sub isCrossCompilation()
sub isAppleWebKit()
{
- return !(isQt() or isGtk() or isWx() or isChromium() or isEfl() or isWinCE() or isBlackBerry());
+ return !(isQt() or isGtk() or isEfl() or isWinCE() or isBlackBerry());
}
sub isAppleMacWebKit()
@@ -1496,7 +1449,7 @@ sub relativeScriptsDir()
sub launcherPath()
{
my $relativeScriptsPath = relativeScriptsDir();
- if (isGtk() || isQt() || isWx() || isEfl() || isWinCE()) {
+ if (isGtk() || isQt() || isEfl() || isWinCE()) {
return "$relativeScriptsPath/run-launcher";
} elsif (isAppleWebKit()) {
return "$relativeScriptsPath/run-safari";
@@ -1509,8 +1462,6 @@ sub launcherName()
return "GtkLauncher";
} elsif (isQt()) {
return "QtTestBrowser";
- } elsif (isWx()) {
- return "wxBrowser";
} elsif (isAppleWebKit()) {
return "Safari";
} elsif (isEfl()) {
@@ -1543,7 +1494,7 @@ sub checkRequiredSystemConfig
print "http://developer.apple.com/tools/xcode\n";
print "*************************************************************\n";
}
- } elsif (isGtk() or isQt() or isWx() or isEfl()) {
+ } elsif (isGtk() or isQt() or isEfl()) {
my @cmds = qw(bison gperf);
if (isQt() and isWindows()) {
push @cmds, "win_flex";
@@ -1587,6 +1538,11 @@ sub windowsSourceDir()
return $windowsSourceDir;
}
+sub windowsSourceSourceDir()
+{
+ return windowsSourceDir() . "\\Source";
+}
+
sub windowsLibrariesDir()
{
return windowsSourceDir() . "\\WebKitLibraries\\win";
@@ -1616,28 +1572,32 @@ sub setupAppleWinEnv()
}
# Those environment variables must be set to be able to build inside Visual Studio.
- $variablesToSet{WEBKITLIBRARIESDIR} = windowsLibrariesDir() unless $ENV{WEBKITLIBRARIESDIR};
- $variablesToSet{WEBKITOUTPUTDIR} = windowsOutputDir() unless $ENV{WEBKITOUTPUTDIR};
+ $variablesToSet{WEBKIT_LIBRARIES} = windowsLibrariesDir() unless $ENV{WEBKIT_LIBRARIES};
+ $variablesToSet{WEBKIT_OUTPUTDIR} = windowsOutputDir() unless $ENV{WEBKIT_OUTPUTDIR};
foreach my $variable (keys %variablesToSet) {
print "Setting the Environment Variable '" . $variable . "' to '" . $variablesToSet{$variable} . "'\n\n";
system qw(regtool -s set), '\\HKEY_CURRENT_USER\\Environment\\' . $variable, $variablesToSet{$variable};
- $restartNeeded ||= $variable eq "WEBKITLIBRARIESDIR" || $variable eq "WEBKITOUTPUTDIR";
+ $restartNeeded ||= $variable eq "WEBKIT_LIBRARIES" || $variable eq "WEBKIT_OUTPUTDIR";
}
if ($restartNeeded) {
print "Please restart your computer before attempting to build inside Visual Studio.\n\n";
}
} else {
- if (!$ENV{'WEBKITLIBRARIESDIR'}) {
- print "Warning: You must set the 'WebKitLibrariesDir' environment variable\n";
- print " to be able build WebKit from within Visual Studio.\n";
- print " Make sure that 'WebKitLibrariesDir' points to the\n";
+ if (!$ENV{'WEBKIT_LIBRARIES'}) {
+ # VS2010 (and newer) version. This will replace the VS2005 version as part of
+ # https://bugs.webkit.org/show_bug.cgi?id=109472.
+ print "Warning: You must set the 'WebKit_Libraries' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2010 and newer.\n";
+ print " Make sure that 'WebKit_Libraries' points to the\n";
print " 'WebKitLibraries/win' directory, not the 'WebKitLibraries/' directory.\n\n";
}
- if (!$ENV{'WEBKITOUTPUTDIR'}) {
- print "Warning: You must set the 'WebKitOutputDir' environment variable\n";
- print " to be able build WebKit from within Visual Studio.\n\n";
+ if (!$ENV{'WEBKIT_OUTPUTDIR'}) {
+ # VS2010 (and newer) version. This will replace the VS2005 version as part of
+ # https://bugs.webkit.org/show_bug.cgi?id=109472.
+ print "Warning: You must set the 'WebKit_OutputDir' environment variable\n";
+ print " to be able build WebKit from within Visual Studio 2010 and newer.\n\n";
}
}
}
@@ -1647,22 +1607,16 @@ sub setupCygwinEnv()
return if !isCygwin() && !isWindows();
return if $vcBuildPath;
- my $vsInstallDir;
- my $programFilesPath = $ENV{'PROGRAMFILES(X86)'} || $ENV{'PROGRAMFILES'} || "C:\\Program Files";
- if ($ENV{'VSINSTALLDIR'}) {
- $vsInstallDir = $ENV{'VSINSTALLDIR'};
- } else {
- $vsInstallDir = File::Spec->catdir($programFilesPath, "Microsoft Visual Studio 8");
- }
- chomp($vsInstallDir = `cygpath "$vsInstallDir"`) if isCygwin();
- $vcBuildPath = File::Spec->catfile($vsInstallDir, qw(Common7 IDE devenv.com));
+ my $programFilesPath = programFilesPath();
+ $vcBuildPath = File::Spec->catfile(visualStudioInstallDir(), qw(Common7 IDE devenv.com));
if (-e $vcBuildPath) {
- # Visual Studio is installed; we can use pdevenv to build.
- # FIXME: Make pdevenv work with non-Cygwin Perl.
- $vcBuildPath = File::Spec->catfile(sourceDir(), qw(Tools Scripts pdevenv)) if isCygwin();
+ # Visual Studio is installed;
+ if (visualStudioVersion() eq "10") {
+ $vcBuildPath = File::Spec->catfile(visualStudioInstallDir(), qw(Common7 IDE devenv.exe));
+ }
} else {
# Visual Studio not found, try VC++ Express
- $vcBuildPath = File::Spec->catfile($vsInstallDir, qw(Common7 IDE VCExpress.exe));
+ $vcBuildPath = File::Spec->catfile(visualStudioInstallDir(), qw(Common7 IDE VCExpress.exe));
if (! -e $vcBuildPath) {
print "*************************************************************\n";
print "Cannot find '$vcBuildPath'\n";
@@ -1684,32 +1638,37 @@ sub setupCygwinEnv()
print "*************************************************************\n";
die;
}
-
- unless ($ENV{WEBKITLIBRARIESDIR}) {
- $ENV{'WEBKITLIBRARIESDIR'} = File::Spec->catdir($sourceDir, "WebKitLibraries", "win");
- chomp($ENV{WEBKITLIBRARIESDIR} = `cygpath -wa '$ENV{WEBKITLIBRARIESDIR}'`) if isCygwin();
- }
print "Building results into: ", baseProductDir(), "\n";
- print "WEBKITOUTPUTDIR is set to: ", $ENV{"WEBKITOUTPUTDIR"}, "\n";
- print "WEBKITLIBRARIESDIR is set to: ", $ENV{"WEBKITLIBRARIESDIR"}, "\n";
+ print "WEBKIT_OUTPUTDIR is set to: ", $ENV{"WEBKIT_OUTPUTDIR"}, "\n";
+ print "WEBKIT_LIBRARIES is set to: ", $ENV{"WEBKIT_LIBRARIES"}, "\n";
}
sub dieIfWindowsPlatformSDKNotInstalled
{
my $registry32Path = "/proc/registry/";
my $registry64Path = "/proc/registry64/";
- my $windowsPlatformSDKRegistryEntry = "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/MicrosoftSDK/InstalledSDKs/D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1";
+ my @windowsPlatformSDKRegistryEntries = (
+ "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Microsoft SDKs/Windows/v8.0A",
+ "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Microsoft SDKs/Windows/v8.0",
+ "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Microsoft SDKs/Windows/v7.1A",
+ "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/Microsoft SDKs/Windows/v7.0A",
+ "HKEY_LOCAL_MACHINE/SOFTWARE/Microsoft/MicrosoftSDK/InstalledSDKs/D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1",
+ );
# FIXME: It would be better to detect whether we are using 32- or 64-bit Windows
# and only check the appropriate entry. But for now we just blindly check both.
- return if (-e $registry32Path . $windowsPlatformSDKRegistryEntry) || (-e $registry64Path . $windowsPlatformSDKRegistryEntry);
+ my $recommendedPlatformSDK = $windowsPlatformSDKRegistryEntries[0];
+
+ while (@windowsPlatformSDKRegistryEntries) {
+ my $windowsPlatformSDKRegistryEntry = shift @windowsPlatformSDKRegistryEntries;
+ return if (-e $registry32Path . $windowsPlatformSDKRegistryEntry) || (-e $registry64Path . $windowsPlatformSDKRegistryEntry);
+ }
print "*************************************************************\n";
- print "Cannot find registry entry '$windowsPlatformSDKRegistryEntry'.\n";
- print "Please download and install the Microsoft Windows Server 2003 R2\n";
- print "Platform SDK from <http://www.microsoft.com/downloads/details.aspx?\n";
- print "familyid=0baf2b35-c656-4969-ace8-e4c0c0716adb&displaylang=en>.\n\n";
+ print "Cannot find registry entry '$recommendedPlatformSDK'.\n";
+ print "Please download and install the Microsoft Windows SDK\n";
+ print "from <http://www.microsoft.com/en-us/download/details.aspx?id=8279>.\n\n";
print "Then follow step 2 in the Windows section of the \"Installing Developer\n";
print "Tools\" instructions at <http://www.webkit.org/building/tools.html>.\n";
print "*************************************************************\n";
@@ -1798,50 +1757,6 @@ sub buildVisualStudioProject
return system @command;
}
-sub downloadWafIfNeeded
-{
- # get / update waf if needed
- my $waf = "$sourceDir/Tools/waf/waf";
- my $wafURL = 'http://wxwebkit.kosoftworks.com/downloads/deps/waf';
- if (!-f $waf) {
- my $result = system "curl -o $waf $wafURL";
- chmod 0755, $waf;
- }
-}
-
-sub buildWafProject
-{
- my ($project, $shouldClean, @options) = @_;
-
- # set the PYTHONPATH for waf
- my $pythonPath = $ENV{'PYTHONPATH'};
- if (!defined($pythonPath)) {
- $pythonPath = '';
- }
- my $sourceDir = sourceDir();
- my $newPythonPath = "$sourceDir/Tools/waf/build:$pythonPath";
- if (isCygwin()) {
- $newPythonPath = `cygpath --mixed --path $newPythonPath`;
- }
- $ENV{'PYTHONPATH'} = $newPythonPath;
-
- print "Building $project\n";
-
- my $wafCommand = "$sourceDir/Tools/waf/waf";
- if ($ENV{'WXWEBKIT_WAF'}) {
- $wafCommand = $ENV{'WXWEBKIT_WAF'};
- }
- if (isCygwin()) {
- $wafCommand = `cygpath --windows "$wafCommand"`;
- chomp($wafCommand);
- }
- if ($shouldClean) {
- return system $wafCommand, "uninstall", "clean", "distclean";
- }
-
- return system $wafCommand, 'configure', 'build', 'install', @options;
-}
-
sub retrieveQMakespecVar
{
my $mkspec = $_[0];
@@ -1901,9 +1816,13 @@ sub autotoolsFlag($$)
sub runAutogenForAutotoolsProjectIfNecessary($@)
{
- my ($dir, $prefix, $sourceDir, $project, @buildArgs) = @_;
+ my ($dir, $prefix, $sourceDir, $project, $joinedOverridableFeatures, @buildArgs) = @_;
+
+ # Always enable introspection when building WebKitGTK+.
+ unshift(@buildArgs, "--enable-introspection");
+
+ my $joinedBuildArgs = join(" ", @buildArgs);
- my $argumentsFile = "previous-autogen-arguments.txt";
if (-e "GNUmakefile") {
# Just assume that build-jsc will never be used to reconfigure JSC. Later
# we can go back and make this more complicated if the demand is there.
@@ -1911,8 +1830,9 @@ sub runAutogenForAutotoolsProjectIfNecessary($@)
return;
}
- # We only run autogen.sh again if the arguments passed have changed.
- if (!mustReRunAutogen($sourceDir, $argumentsFile, @buildArgs)) {
+ # Run autogen.sh again if either the features overrided by build-webkit or build arguments have changed.
+ if (!mustReRunAutogen($sourceDir, "WebKitFeatureOverrides.txt", $joinedOverridableFeatures)
+ && !mustReRunAutogen($sourceDir, "previous-autogen-arguments.txt", $joinedBuildArgs)) {
return;
}
}
@@ -1923,8 +1843,12 @@ sub runAutogenForAutotoolsProjectIfNecessary($@)
# Only for WebKit, write the autogen.sh arguments to a file so that we can detect
# when they change and automatically re-run it.
if ($project eq 'WebKit') {
- open(AUTOTOOLS_ARGUMENTS, ">$argumentsFile");
- print AUTOTOOLS_ARGUMENTS join(" ", @buildArgs);
+ open(OVERRIDABLE_FEATURES, ">WebKitFeatureOverrides.txt");
+ print OVERRIDABLE_FEATURES $joinedOverridableFeatures;
+ close(OVERRIDABLE_FEATURES);
+
+ open(AUTOTOOLS_ARGUMENTS, ">previous-autogen-arguments.txt");
+ print AUTOTOOLS_ARGUMENTS $joinedBuildArgs;
close(AUTOTOOLS_ARGUMENTS);
}
@@ -1942,10 +1866,7 @@ sub runAutogenForAutotoolsProjectIfNecessary($@)
# Prefix the command with jhbuild run.
unshift(@buildArgs, "$relSourceDir/autogen.sh");
- my $jhbuildWrapperPrefix = jhbuildWrapperPrefixIfNeeded();
- if ($jhbuildWrapperPrefix) {
- unshift(@buildArgs, $jhbuildWrapperPrefix);
- }
+ unshift(@buildArgs, jhbuildWrapperPrefixIfNeeded());
if (system(@buildArgs) ne 0) {
die "Calling autogen.sh failed!\n";
}
@@ -1953,29 +1874,34 @@ sub runAutogenForAutotoolsProjectIfNecessary($@)
sub getJhbuildPath()
{
- return join('/', baseProductDir(), "Dependencies");
+ my @jhbuildPath = File::Spec->splitdir(baseProductDir());
+ if (isGit() && isGitBranchBuild() && gitBranch()) {
+ pop(@jhbuildPath);
+ }
+ push(@jhbuildPath, "Dependencies");
+ return File::Spec->catdir(@jhbuildPath);
}
sub mustReRunAutogen($@)
{
- my ($sourceDir, $filename, @currentArguments) = @_;
+ my ($sourceDir, $filename, $currentContents) = @_;
if (! -e $filename) {
return 1;
}
- open(AUTOTOOLS_ARGUMENTS, $filename);
- chomp(my $previousArguments = <AUTOTOOLS_ARGUMENTS>);
- close(AUTOTOOLS_ARGUMENTS);
+ open(CONTENTS_FILE, $filename);
+ chomp(my $previousContents = <CONTENTS_FILE>);
+ close(CONTENTS_FILE);
# We only care about the WebKit2 argument when we are building WebKit itself.
# build-jsc never passes --enable-webkit2, so if we didn't do this, autogen.sh
# would run for every single build on the bots, since it runs both build-webkit
# and build-jsc.
- my $joinedCurrentArguments = join(" ", @currentArguments);
- if ($previousArguments ne $joinedCurrentArguments) {
- print "Previous autogen arguments were: $previousArguments\n\n";
- print "New autogen arguments are: $joinedCurrentArguments\n";
+ if ($previousContents ne $currentContents) {
+ print "Contents for file $filename have changed.\n";
+ print "Previous contents were: $previousContents\n\n";
+ print "New contents are: $currentContents\n";
return 1;
}
@@ -1984,12 +1910,11 @@ sub mustReRunAutogen($@)
sub buildAutotoolsProject($@)
{
- my ($project, $clean, @buildParams) = @_;
+ my ($project, $clean, $prefix, $makeArgs, $noWebKit1, $noWebKit2, @features) = @_;
my $make = 'make';
my $dir = productDir();
my $config = passedConfiguration() || configuration();
- my $prefix;
# Use rm to clean the build directory since distclean may miss files
if ($clean && -d $dir) {
@@ -2005,19 +1930,45 @@ sub buildAutotoolsProject($@)
return 0;
}
- my @buildArgs = ();
- my $makeArgs = $ENV{"WebKitMakeArguments"} || "";
- for my $i (0 .. $#buildParams) {
- my $opt = $buildParams[$i];
- if ($opt =~ /^--makeargs=(.*)/i ) {
- $makeArgs = $makeArgs . " " . $1;
- } elsif ($opt =~ /^--prefix=(.*)/i ) {
- $prefix = $1;
- } else {
- push @buildArgs, $opt;
+ my @buildArgs = @ARGV;
+ if ($noWebKit1) {
+ unshift(@buildArgs, "--disable-webkit1");
+ }
+ if ($noWebKit2) {
+ unshift(@buildArgs, "--disable-webkit2");
+ }
+
+ # Configurable features listed here should be kept in sync with the
+ # features for which there exists a configuration option in configure.ac.
+ my %configurableFeatures = (
+ "battery-status" => 1,
+ "gamepad" => 1,
+ "geolocation" => 1,
+ "svg" => 1,
+ "svg-fonts" => 1,
+ "video" => 1,
+ "webgl" => 1,
+ "web-audio" => 1,
+ );
+
+ # These features are ones which build-webkit cannot control, typically because
+ # they can only be active when we have the proper dependencies.
+ my %unsetFeatures = (
+ "accelerated-2d-canvas" => 1,
+ );
+
+ my @overridableFeatures = ();
+ foreach (@features) {
+ if ($configurableFeatures{$_->{option}}) {
+ push @buildArgs, autotoolsFlag(${$_->{value}}, $_->{option});;
+ } elsif (!$unsetFeatures{$_->{option}}) {
+ push @overridableFeatures, $_->{define} . "=" . (${$_->{value}} ? "1" : "0");
}
}
+ $makeArgs = $makeArgs || "";
+ $makeArgs = $makeArgs . " " . $ENV{"WebKitMakeArguments"} if $ENV{"WebKitMakeArguments"};
+
# Automatically determine the number of CPUs for make only
# if make arguments haven't already been specified.
if ($makeArgs eq "") {
@@ -2042,9 +1993,6 @@ sub buildAutotoolsProject($@)
push @buildArgs, "--disable-debug";
}
- # Enable unstable features when building through build-webkit.
- push @buildArgs, "--enable-unstable-features";
-
if (checkForArgumentAndRemoveFromArrayRef("--update-gtk", \@buildArgs)) {
# Force autogen to run, to catch the possibly updated libraries.
system("rm -f previous-autogen-arguments.txt");
@@ -2055,22 +2003,21 @@ sub buildAutotoolsProject($@)
# If GNUmakefile exists, don't run autogen.sh unless its arguments
# have changed. The makefile should be smart enough to track autotools
# dependencies and re-run autogen.sh when build files change.
- runAutogenForAutotoolsProjectIfNecessary($dir, $prefix, $sourceDir, $project, @buildArgs);
+ my $joinedOverridableFeatures = join(" ", @overridableFeatures);
+ runAutogenForAutotoolsProjectIfNecessary($dir, $prefix, $sourceDir, $project, $joinedOverridableFeatures, @buildArgs);
- my $runWithJhbuild = jhbuildWrapperPrefixIfNeeded();
+ my $runWithJhbuild = join(" ", jhbuildWrapperPrefixIfNeeded());
if (system("$runWithJhbuild $make $makeArgs") ne 0) {
die "\nFailed to build WebKit using '$make'!\n";
}
chdir ".." or die;
- if ($project eq 'WebKit' && !isCrossCompilation()) {
+ if ($project eq 'WebKit' && !isCrossCompilation() && !($noWebKit1 && $noWebKit2)) {
my @docGenerationOptions = ("$sourceDir/Tools/gtk/generate-gtkdoc", "--skip-html");
push(@docGenerationOptions, productDir());
- if ($runWithJhbuild) {
- unshift(@docGenerationOptions, $runWithJhbuild);
- }
+ unshift(@docGenerationOptions, jhbuildWrapperPrefixIfNeeded());
if (system(@docGenerationOptions)) {
die "\n gtkdoc did not build without warnings\n";
@@ -2083,14 +2030,18 @@ sub buildAutotoolsProject($@)
sub jhbuildWrapperPrefixIfNeeded()
{
if (-e getJhbuildPath()) {
+ my @prefix = (File::Spec->catfile(sourceDir(), "Tools", "jhbuild", "jhbuild-wrapper"));
if (isEfl()) {
- return File::Spec->catfile(sourceDir(), "Tools", "efl", "run-with-jhbuild");
+ push(@prefix, "--efl");
} elsif (isGtk()) {
- return File::Spec->catfile(sourceDir(), "Tools", "gtk", "run-with-jhbuild");
+ push(@prefix, "--gtk");
}
+ push(@prefix, "run");
+
+ return @prefix;
}
- return "";
+ return ();
}
sub removeCMakeCache()
@@ -2117,6 +2068,8 @@ sub generateBuildSystemFromCMakeProject
} elsif ($config =~ /debug/i) {
push @args, "-DCMAKE_BUILD_TYPE=Debug";
}
+ # Don't warn variables which aren't used by cmake ports.
+ push @args, "--no-warn-unused-cli";
push @args, @cmakeArgs if @cmakeArgs;
push @args, $additionalCMakeArgs if $additionalCMakeArgs;
@@ -2131,7 +2084,7 @@ sub generateBuildSystemFromCMakeProject
# We call system("cmake @args") instead of system("cmake", @args) so that @args is
# parsed for shell metacharacters.
- my $wrapper = jhbuildWrapperPrefixIfNeeded() . " ";
+ my $wrapper = join(" ", jhbuildWrapperPrefixIfNeeded()) . " ";
my $returnCode = system($wrapper . "cmake @args");
chdir($originalWorkingDirectory);
@@ -2151,7 +2104,7 @@ sub buildCMakeGeneratedProject($)
# We call system("cmake @args") instead of system("cmake", @args) so that @args is
# parsed for shell metacharacters. In particular, $makeArgs may contain such metacharacters.
- my $wrapper = jhbuildWrapperPrefixIfNeeded() . " ";
+ my $wrapper = join(" ", jhbuildWrapperPrefixIfNeeded()) . " ";
return system($wrapper . "cmake @args");
}
@@ -2190,7 +2143,7 @@ sub buildCMakeProjectOrExit($$$$@)
sub cmakeBasedPortArguments()
{
return blackberryCMakeArguments() if isBlackBerry();
- return ('-DCMAKE_WINCE_SDK="STANDARDSDK_500 (ARMV4I)"') if isWinCE();
+ return ('-G "Visual Studio 8 2005 STANDARDSDK_500 (ARMV4I)"') if isWinCE();
return ();
}
@@ -2241,7 +2194,7 @@ sub buildQMakeProjects
}
# Automatically determine the number of CPUs for make only if this make argument haven't already been specified.
- if ($make eq "make" && $makeargs !~ /-j\s*\d+/i && (!defined $ENV{"MAKEFLAGS"} || ($ENV{"MAKEFLAGS"} !~ /-j\s*\d+/i ))) {
+ if ($make eq "make" && $makeargs !~ /-[^\s]*?j\s*\d+/i && (!defined $ENV{"MAKEFLAGS"} || ($ENV{"MAKEFLAGS"} !~ /-[^\s]*?j\s*\d+/i ))) {
$makeargs .= " -j" . numberOfCPUs();
}
@@ -2324,6 +2277,11 @@ sub buildQMakeProjects
my $maybeNeedsCleanBuild = 0;
my $needsIncrementalBuild = 0;
+ # Full incremental build (run qmake) needed on buildbots and EWS bots always.
+ if (grep(/CONFIG\+=buildbot/,@buildParams)) {
+ $needsIncrementalBuild = 1;
+ }
+
if ($svnRevision ne $previousSvnRevision) {
print "Last built revision was " . $previousSvnRevision .
", now at revision $svnRevision. Full incremental build needed.\n";
@@ -2401,129 +2359,13 @@ EOF
sub buildGtkProject
{
- my ($project, $clean, @buildArgs) = @_;
+ my ($project, $clean, $prefix, $makeArgs, $noWebKit1, $noWebKit2, @features) = @_;
if ($project ne "WebKit" and $project ne "JavaScriptCore" and $project ne "WTF") {
die "Unsupported project: $project. Supported projects: WebKit, JavaScriptCore, WTF\n";
}
- return buildAutotoolsProject($project, $clean, @buildArgs);
-}
-
-sub buildChromiumMakefile($$@)
-{
- my ($target, $clean, @options) = @_;
- if ($clean) {
- return system qw(rm -rf out);
- }
- my $config = configuration();
- my $numCpus = numberOfCPUs();
- my $makeArgs;
- for (@options) {
- $makeArgs = $1 if /^--makeargs=(.*)/i;
- }
- $makeArgs = "-j$numCpus" if not $makeArgs;
- my $command .= "make -fMakefile.chromium $makeArgs BUILDTYPE=$config $target";
-
- print "$command\n";
- return system $command;
-}
-
-sub buildChromiumNinja($$@)
-{
- # rm -rf out requires rerunning gyp, so don't support --clean for now.
- my ($target, @options) = @_;
- my $config = configuration();
- my $makeArgs = "";
- for (@options) {
- $makeArgs = $1 if /^--makeargs=(.*)/i;
- }
- my $command = "";
-
- # Find ninja.
- my $ninjaPath;
- if (commandExists('ninja')) {
- $ninjaPath = 'ninja';
- } elsif (-e 'Source/WebKit/chromium/depot_tools/ninja') {
- $ninjaPath = 'Source/WebKit/chromium/depot_tools/ninja';
- } else {
- die "ninja not found. Install chromium's depot_tools by running update-webkit first\n";
- }
-
- $command .= "$ninjaPath -C out/$config $target $makeArgs";
-
- print "$command\n";
- return system $command;
-}
-
-sub buildChromiumVisualStudioProject($$)
-{
- my ($projectPath, $clean) = @_;
-
- my $config = configuration();
- my $action = "/build";
- $action = "/clean" if $clean;
-
- # Find Visual Studio installation.
- my $vsInstallDir;
- my $programFilesPath = $ENV{'PROGRAMFILES'} || "C:\\Program Files";
- if ($ENV{'VSINSTALLDIR'}) {
- $vsInstallDir = $ENV{'VSINSTALLDIR'};
- } else {
- $vsInstallDir = "$programFilesPath/Microsoft Visual Studio 8";
- }
- $vsInstallDir =~ s,\\,/,g;
- $vsInstallDir = `cygpath "$vsInstallDir"` if isCygwin();
- chomp $vsInstallDir;
- $vcBuildPath = "$vsInstallDir/Common7/IDE/devenv.com";
- if (! -e $vcBuildPath) {
- # Visual Studio not found, try VC++ Express
- $vcBuildPath = "$vsInstallDir/Common7/IDE/VCExpress.exe";
- if (! -e $vcBuildPath) {
- print "*************************************************************\n";
- print "Cannot find '$vcBuildPath'\n";
- print "Please execute the file 'vcvars32.bat' from\n";
- print "'$programFilesPath\\Microsoft Visual Studio 8\\VC\\bin\\'\n";
- print "to setup the necessary environment variables.\n";
- print "*************************************************************\n";
- die;
- }
- }
-
- # Create command line and execute it.
- my @command = ($vcBuildPath, $projectPath, $action, $config);
- print "Building results into: ", baseProductDir(), "\n";
- print join(" ", @command), "\n";
- return system @command;
-}
-
-sub buildChromium($@)
-{
- my ($clean, @options) = @_;
-
- # We might need to update DEPS or re-run GYP if things have changed.
- if (checkForArgumentAndRemoveFromArrayRef("--update-chromium", \@options)) {
- my @updateCommand = ("perl", "Tools/Scripts/update-webkit-chromium", "--force");
- push @updateCommand, "--chromium-android" if isChromiumAndroid();
- system(@updateCommand) == 0 or die $!;
- }
-
- my $result = 1;
- if (isDarwin() && !isChromiumAndroid() && !isChromiumMacMake() && !isChromiumNinja()) {
- # Mac build - builds the root xcode project.
- $result = buildXCodeProject("Source/WebKit/chromium/All", $clean, "-configuration", configuration(), @options);
- } elsif (isCygwin() || isWindows()) {
- # Windows build - builds the root visual studio solution.
- $result = buildChromiumVisualStudioProject("Source/WebKit/chromium/All.sln", $clean);
- } elsif (isChromiumNinja() && !isChromiumAndroid()) {
- $result = buildChromiumNinja("all", $clean, @options);
- } elsif (isLinux() || isChromiumAndroid() || isChromiumMacMake()) {
- # Linux build - build using make.
- $result = buildChromiumMakefile("all", $clean, @options);
- } else {
- print STDERR "This platform is not supported by chromium.\n";
- }
- return $result;
+ return buildAutotoolsProject($project, $clean, $prefix, $makeArgs, $noWebKit1, $noWebKit2, @features);
}
sub appleApplicationSupportPath
@@ -2640,7 +2482,7 @@ sub execMacWebKitAppForDebugging($)
die "Targetting the Web Process is not compatible with using an XPC Service for the Web Process at this time.";
}
- my $webProcessShimPath = File::Spec->catfile($productDir, "WebProcessShim.dylib");
+ my $webProcessShimPath = File::Spec->catfile($productDir, "SecItemShim.dylib");
my $webProcessPath = File::Spec->catdir($productDir, "WebProcess.app");
my $webKit2ExecutablePath = File::Spec->catfile($productDir, "WebKit2.framework", "WebKit2");
@@ -2658,15 +2500,6 @@ sub debugSafari
execMacWebKitAppForDebugging(safariPath());
}
- if (isAppleWinWebKit()) {
- setupCygwinEnv();
- my $productDir = productDir();
- chomp($ENV{WEBKITNIGHTLY} = `cygpath -wa "$productDir"`);
- my $safariPath = safariPath();
- chomp($safariPath = `cygpath -wa "$safariPath"`);
- return system { $vcBuildPath } $vcBuildPath, "/debugexe", "\"$safariPath\"", @ARGV;
- }
-
return 1; # Unsupported platform; can't debug Safari on this platform.
}
@@ -2680,7 +2513,7 @@ sub runSafari
if (isAppleWinWebKit()) {
my $result;
my $productDir = productDir();
- my $webKitLauncherPath = File::Spec->catfile(productDir(), "WebKit.exe");
+ my $webKitLauncherPath = File::Spec->catfile(productDir(), "WinLauncher.exe");
return system { $webKitLauncherPath } $webKitLauncherPath, @ARGV;
}
diff --git a/Tools/Scripts/webkitperl/FeatureList.pm b/Tools/Scripts/webkitperl/FeatureList.pm
index c104d1f31..0e39102ed 100644
--- a/Tools/Scripts/webkitperl/FeatureList.pm
+++ b/Tools/Scripts/webkitperl/FeatureList.pm
@@ -44,35 +44,42 @@ BEGIN {
my (
$threeDRenderingSupport,
$accelerated2DCanvasSupport,
- $animationAPISupport,
$batteryStatusSupport,
$blobSupport,
+ $canvasPathSupport,
+ $canvasProxySupport,
$channelMessagingSupport,
$cspNextSupport,
- $css3BackgroundSupport,
$css3ConditionalRulesSupport,
$css3TextSupport,
+ $css3TextLineBreakSupport,
+ $css4ImagesSupport,
$cssBoxDecorationBreakSupport,
$cssDeviceAdaptation,
$cssExclusionsSupport,
$cssFiltersSupport,
- $cssHierarchiesSupport,
$cssImageOrientationSupport,
$cssImageResolutionSupport,
+ $cssImageSetSupport,
$cssRegionsSupport,
$cssShadersSupport,
+ $cssShapesSupport,
+ $cssStickyPositionSupport,
$cssCompositingSupport,
+ $cssAnimationsTransformsUnprefixedSupport,
$cssVariablesSupport,
$customSchemeHandlerSupport,
$dataTransferItemsSupport,
- $datalistSupport,
- $detailsSupport,
+ $datalistElementSupport,
+ $detailsElementSupport,
$deviceOrientationSupport,
$dialogElementSupport,
$directoryUploadSupport,
+ $dom4EventsConstructor,
$downloadAttributeSupport,
$fileSystemSupport,
$filtersSupport,
+ $fontLoadEventsSupport,
$ftpDirSupport,
$fullscreenAPISupport,
$gamepadSupport,
@@ -80,7 +87,6 @@ my (
$highDPICanvasSupport,
$icondatabaseSupport,
$iframeSeamlessSupport,
- $imageResizerSupport,
$indexedDatabaseSupport,
$inputSpeechSupport,
$inputTypeColorSupport,
@@ -102,72 +108,79 @@ my (
$mediaSourceSupport,
$mediaStatisticsSupport,
$mediaStreamSupport,
- $meterTagSupport,
+ $meterElementSupport,
$mhtmlSupport,
$microdataSupport,
- $mutationObserversSupport,
+ $mouseCursorScaleSupport,
$netscapePluginAPISupport,
$networkInfoSupport,
+ $nosniffSupport,
$notificationsSupport,
$orientationEventsSupport,
$pageVisibilityAPISupport,
- $progressTagSupport,
+ $performanceTimelineSupport,
+ $progressElementSupport,
$proximityEventsSupport,
$quotaSupport,
$resolutionMediaQuerySupport,
$registerProtocolHandlerSupport,
$requestAnimationFrameSupport,
+ $resourceTimingSupport,
$scriptedSpeechSupport,
+ $seccompFiltersSupport,
$shadowDOMSupport,
$sharedWorkersSupport,
$sqlDatabaseSupport,
$styleScopedSupport,
+ $suidLinuxSandbox,
$svgDOMObjCBindingsSupport,
$svgFontsSupport,
$svgSupport,
$systemMallocSupport,
- $templateTagSupport,
+ $templateElementSupport,
$textAutosizingSupport,
$tiledBackingStoreSupport,
+ $threadedHTMLParserSupport,
$touchEventsSupport,
$touchSliderSupport,
$touchIconLoadingSupport,
+ $userTimingSupport,
$vibrationSupport,
$videoSupport,
$videoTrackSupport,
$webglSupport,
$webAudioSupport,
- $webIntentsSupport,
- $webIntentsTagSupport,
$webSocketsSupport,
$webTimingSupport,
$workersSupport,
- $xhrResponseBlobSupport,
$xhrTimeoutSupport,
$xsltSupport,
);
my @features = (
{ option => "3d-rendering", desc => "Toggle 3D Rendering support",
- define => "ENABLE_3D_RENDERING", default => (isAppleMacWebKit() || isQt()), value => \$threeDRenderingSupport },
+ define => "ENABLE_3D_RENDERING", default => (isAppleMacWebKit() || isGtk() || isEfl()), value => \$threeDRenderingSupport },
{ option => "accelerated-2d-canvas", desc => "Toggle Accelerated 2D Canvas support",
define => "ENABLE_ACCELERATED_2D_CANVAS", default => 0, value => \$accelerated2DCanvasSupport },
- { option => "animation-api", desc => "Toggle Animation API support",
- define => "ENABLE_ANIMATION_API", default => (isBlackBerry() || isEfl()), value => \$animationAPISupport },
-
{ option => "battery-status", desc => "Toggle Battery Status support",
define => "ENABLE_BATTERY_STATUS", default => (isEfl() || isBlackBerry()), value => \$batteryStatusSupport },
{ option => "blob", desc => "Toggle Blob support",
- define => "ENABLE_BLOB", default => (isAppleMacWebKit() || isGtk() || isChromium() || isBlackBerry() || isEfl()), value => \$blobSupport },
+ define => "ENABLE_BLOB", default => (isAppleMacWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$blobSupport },
+
+ { option => "canvas-path", desc => "Toggle Canvas Path support",
+ define => "ENABLE_CANVAS_PATH", default => 1, value => \$canvasPathSupport },
+
+ { option => "canvas-proxy", desc => "Toggle CanvasProxy support",
+ define => "ENABLE_CANVAS_PROXY", default => 0, value => \$canvasProxySupport },
{ option => "channel-messaging", desc => "Toggle Channel Messaging support",
define => "ENABLE_CHANNEL_MESSAGING", default => 1, value => \$channelMessagingSupport },
{ option => "csp-next", desc => "Toggle Content Security Policy 1.1 support",
- define => "ENABLE_CSP_NEXT", default => 0, value => \$cspNextSupport },
+ define => "ENABLE_CSP_NEXT", default => isGtk(), value => \$cspNextSupport },
{ option => "css-device-adaptation", desc => "Toggle CSS Device Adaptation support",
define => "ENABLE_CSS_DEVICE_ADAPTATION", default => isEfl(), value => \$cssDeviceAdaptation },
@@ -175,26 +188,32 @@ my @features = (
{ option => "css-exclusions", desc => "Toggle CSS Exclusions support",
define => "ENABLE_CSS_EXCLUSIONS", default => 1, value => \$cssExclusionsSupport },
+ { option => "css-shapes", desc => "Toggle CSS Shapes support",
+ define => "ENABLE_CSS_SHAPES", default => 1, value => \$cssShapesSupport },
+
{ option => "css-filters", desc => "Toggle CSS Filters support",
define => "ENABLE_CSS_FILTERS", default => isAppleWebKit() || isBlackBerry(), value => \$cssFiltersSupport },
- { option => "css3-background", desc => "Toggle CSS3 Background support",
- define => "ENABLE_CSS3_BACKGROUND", default => 0, value => \$css3BackgroundSupport },
-
{ option => "css3-conditional-rules", desc => "Toggle CSS3 Conditional Rules support (i.e. \@supports)",
define => "ENABLE_CSS3_CONDITIONAL_RULES", default => 0, value => \$css3ConditionalRulesSupport },
{ option => "css3-text", desc => "Toggle CSS3 Text support",
- define => "ENABLE_CSS3_TEXT", default => isEfl(), value => \$css3TextSupport },
+ define => "ENABLE_CSS3_TEXT", default => (isEfl() || isGtk()), value => \$css3TextSupport },
+
+ { option => "css3-text-line-break", desc => "Toggle CSS3 Text Line Break support",
+ define => "ENABLE_CSS3_TEXT_LINE_BREAK", default => 0, value => \$css3TextLineBreakSupport },
{ option => "css-box-decoration-break", desc => "Toggle CSS box-decoration-break support",
define => "ENABLE_CSS_BOX_DECORATION_BREAK", default => 1, value => \$cssBoxDecorationBreakSupport },
{ option => "css-image-orientation", desc => "Toggle CSS image-orientation support",
- define => "ENABLE_CSS_IMAGE_ORIENTATION", default => 0, value => \$cssImageOrientationSupport },
+ define => "ENABLE_CSS_IMAGE_ORIENTATION", default => isGtk(), value => \$cssImageOrientationSupport },
{ option => "css-image-resolution", desc => "Toggle CSS image-resolution support",
- define => "ENABLE_CSS_IMAGE_RESOLUTION", default => 0, value => \$cssImageResolutionSupport },
+ define => "ENABLE_CSS_IMAGE_RESOLUTION", default => (isBlackBerry() || isGtk()), value => \$cssImageResolutionSupport },
+
+ { option => "css-image-set", desc => "Toggle CSS image-set support",
+ define => "ENABLE_CSS_IMAGE_SET", default => (isEfl() || isGtk()), value => \$cssImageSetSupport },
{ option => "css-regions", desc => "Toggle CSS Regions support",
define => "ENABLE_CSS_REGIONS", default => 1, value => \$cssRegionsSupport },
@@ -202,33 +221,42 @@ my @features = (
{ option => "css-shaders", desc => "Toggle CSS Shaders support",
define => "ENABLE_CSS_SHADERS", default => isAppleMacWebKit(), value => \$cssShadersSupport },
+ { option => "css-sticky-position", desc => "Toggle CSS sticky position support",
+ define => "ENABLE_CSS_STICKY_POSITION", default => (isGtk() || isEfl()), value => \$cssStickyPositionSupport },
+
{ option => "css-compositing", desc => "Toggle CSS Compositing support",
define => "ENABLE_CSS_COMPOSITING", default => isAppleWebKit(), value => \$cssCompositingSupport },
+ { option => "css-transforms-animations-unprefixed", desc => "Toggle support for unprefixed CSS animations and transforms",
+ define => "ENABLE_CSS_TRANSFORMS_ANIMATIONS_UNPREFIXED", default => 1, value => \$cssAnimationsTransformsUnprefixedSupport },
+
{ option => "css-variables", desc => "Toggle CSS Variable support",
- define => "ENABLE_CSS_VARIABLES", default => (isBlackBerry() || isEfl()), value => \$cssVariablesSupport },
+ define => "ENABLE_CSS_VARIABLES", default => (isBlackBerry() || isEfl() || isGtk()), value => \$cssVariablesSupport },
{ option => "custom-scheme-handler", desc => "Toggle Custom Scheme Handler support",
define => "ENABLE_CUSTOM_SCHEME_HANDLER", default => (isBlackBerry() || isEfl()), value => \$customSchemeHandlerSupport },
- { option => "datalist", desc => "Toggle Datalist support",
- define => "ENABLE_DATALIST_ELEMENT", default => isEfl(), value => \$datalistSupport },
+ { option => "datalist-element", desc => "Toggle Datalist Element support",
+ define => "ENABLE_DATALIST_ELEMENT", default => isEfl(), value => \$datalistElementSupport },
{ option => "data-transfer-items", desc => "Toggle Data Transfer Items support",
define => "ENABLE_DATA_TRANSFER_ITEMS", default => 0, value => \$dataTransferItemsSupport },
- { option => "details", desc => "Toggle Details support",
- define => "ENABLE_DETAILS_ELEMENT", default => 1, value => \$detailsSupport },
+ { option => "details-element", desc => "Toggle Details Element support",
+ define => "ENABLE_DETAILS_ELEMENT", default => 1, value => \$detailsElementSupport },
{ option => "device-orientation", desc => "Toggle Device Orientation support",
define => "ENABLE_DEVICE_ORIENTATION", default => isBlackBerry(), value => \$deviceOrientationSupport },
- { option => "dialog", desc => "Toggle Dialog Element support",
+ { option => "dialog-element", desc => "Toggle Dialog Element support",
define => "ENABLE_DIALOG_ELEMENT", default => 0, value => \$dialogElementSupport },
{ option => "directory-upload", desc => "Toggle Directory Upload support",
define => "ENABLE_DIRECTORY_UPLOAD", default => 0, value => \$directoryUploadSupport },
+ { option => "dom4-events-constructor", desc => "Expose DOM4 Events constructors",
+ define => "ENABLE_DOM4_EVENTS_CONSTRUCTOR", default => (isAppleWebKit() || isGtk() || isEfl()), value => \$dom4EventsConstructor },
+
{ option => "download-attribute", desc => "Toggle Download Attribute support",
define => "ENABLE_DOWNLOAD_ATTRIBUTE", default => (isBlackBerry() || isEfl()), value => \$downloadAttributeSupport },
@@ -236,16 +264,19 @@ my @features = (
define => "ENABLE_FILE_SYSTEM", default => isBlackBerry(), value => \$fileSystemSupport },
{ option => "filters", desc => "Toggle Filters support",
- define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isQt() || isEfl() || isBlackBerry()), value => \$filtersSupport },
+ define => "ENABLE_FILTERS", default => (isAppleWebKit() || isGtk() || isEfl() || isBlackBerry()), value => \$filtersSupport },
+
+ { option => "font-load-events", desc => "Toggle Font Load Events support",
+ define => "ENABLE_FONT_LOAD_EVENTS", default => 0, value => \$fontLoadEventsSupport },
{ option => "ftpdir", desc => "Toggle FTP Directory support",
define => "ENABLE_FTPDIR", default => !isWinCE(), value => \$ftpDirSupport },
{ option => "fullscreen-api", desc => "Toggle Fullscreen API support",
- define => "ENABLE_FULLSCREEN_API", default => (isAppleMacWebKit() || isEfl() || isGtk() || isBlackBerry() || isQt()), value => \$fullscreenAPISupport },
+ define => "ENABLE_FULLSCREEN_API", default => (isAppleMacWebKit() || isEfl() || isGtk() || isBlackBerry()), value => \$fullscreenAPISupport },
{ option => "gamepad", desc => "Toggle Gamepad support",
- define => "ENABLE_GAMEPAD", default => (isEfl() || isGtk() || isQt()), value => \$gamepadSupport },
+ define => "ENABLE_GAMEPAD", default => (isEfl() || isGtk()), value => \$gamepadSupport },
{ option => "geolocation", desc => "Toggle Geolocation support",
define => "ENABLE_GEOLOCATION", default => (isAppleWebKit() || isGtk() || isBlackBerry()), value => \$geolocationSupport },
@@ -260,19 +291,19 @@ my @features = (
define => "ENABLE_IFRAME_SEAMLESS", default => 1, value => \$iframeSeamlessSupport },
{ option => "indexed-database", desc => "Toggle Indexed Database support",
- define => "ENABLE_INDEXED_DATABASE", default => 0, value => \$indexedDatabaseSupport },
+ define => "ENABLE_INDEXED_DATABASE", default => (isGtk() || isEfl()), value => \$indexedDatabaseSupport },
{ option => "input-speech", desc => "Toggle Input Speech support",
define => "ENABLE_INPUT_SPEECH", default => 0, value => \$inputSpeechSupport },
{ option => "input-type-color", desc => "Toggle Input Type Color support",
- define => "ENABLE_INPUT_TYPE_COLOR", default => (isBlackBerry() || isEfl() || isQt()), value => \$inputTypeColorSupport },
+ define => "ENABLE_INPUT_TYPE_COLOR", default => (isBlackBerry() || isEfl()), value => \$inputTypeColorSupport },
{ option => "input-type-date", desc => "Toggle Input Type Date support",
define => "ENABLE_INPUT_TYPE_DATE", default => 0, value => \$inputTypeDateSupport },
- { option => "input-type-datetime", desc => "Toggle Input Type Datetime support",
- define => "ENABLE_INPUT_TYPE_DATETIME", default => 0, value => \$inputTypeDatetimeSupport },
+ { option => "input-type-datetime", desc => "Toggle broken Input Type Datetime support",
+ define => "ENABLE_INPUT_TYPE_DATETIME_INCOMPLETE", default => 0, value => \$inputTypeDatetimeSupport },
{ option => "input-type-datetimelocal", desc => "Toggle Input Type Datetimelocal support",
define => "ENABLE_INPUT_TYPE_DATETIMELOCAL", default => 0, value => \$inputTypeDatetimelocalSupport },
@@ -296,7 +327,7 @@ my @features = (
define => "ENABLE_LEGACY_NOTIFICATIONS", default => isBlackBerry(), value => \$legacyNotificationsSupport },
{ option => "legacy-vendor-prefixes", desc => "Toggle Legacy Vendor Prefix support",
- define => "ENABLE_LEGACY_VENDOR_PREFIXES", default => !isChromium(), value => \$legacyVendorPrefixSupport },
+ define => "ENABLE_LEGACY_VENDOR_PREFIXES", default => 1, value => \$legacyVendorPrefixSupport },
{ option => "legacy-web-audio", desc => "Toggle Legacy Web Audio support",
define => "ENABLE_LEGACY_WEB_AUDIO", default => 1, value => \$legacyWebAudioSupport },
@@ -304,11 +335,8 @@ my @features = (
{ option => "link-prefetch", desc => "Toggle Link Prefetch support",
define => "ENABLE_LINK_PREFETCH", default => (isGtk() || isEfl()), value => \$linkPrefetchSupport },
- { option => "link-prerender", desc => "Toggle Link Prerender support",
- define => "ENABLE_LINK_PRERENDER", default => 0, value => \$linkPrerenderSupport },
-
{ option => "mathml", desc => "Toggle MathML support",
- define => "ENABLE_MATHML", default => 1, value => \$mathmlSupport },
+ define => "ENABLE_MATHML", default => !isBlackBerry(), value => \$mathmlSupport },
{ option => "media-capture", desc => "Toggle Media Capture support",
define => "ENABLE_MEDIA_CAPTURE", default => isEfl(), value => \$mediaCaptureSupport },
@@ -320,19 +348,19 @@ my @features = (
define => "ENABLE_MEDIA_STATISTICS", default => 0, value => \$mediaStatisticsSupport },
{ option => "media-stream", desc => "Toggle Media Stream support",
- define => "ENABLE_MEDIA_STREAM", default => (isChromium() || isGtk() || isBlackBerry()), value => \$mediaStreamSupport },
+ define => "ENABLE_MEDIA_STREAM", default => isBlackBerry(), value => \$mediaStreamSupport },
- { option => "meter-tag", desc => "Toggle Meter Tag support",
- define => "ENABLE_METER_ELEMENT", default => !isAppleWinWebKit(), value => \$meterTagSupport },
+ { option => "meter-element", desc => "Toggle Meter Element support",
+ define => "ENABLE_METER_ELEMENT", default => !isAppleWinWebKit(), value => \$meterElementSupport },
{ option => "mhtml", desc => "Toggle MHTML support",
- define => "ENABLE_MHTML", default => isGtk(), value => \$mhtmlSupport },
+ define => "ENABLE_MHTML", default => (isGtk() || isEfl()), value => \$mhtmlSupport },
{ option => "microdata", desc => "Toggle Microdata support",
- define => "ENABLE_MICRODATA", default => (isEfl() || isBlackBerry()), value => \$microdataSupport },
+ define => "ENABLE_MICRODATA", default => (isEfl() || isBlackBerry() || isGtk()), value => \$microdataSupport },
- { option => "mutation-observers", desc => "Toggle Mutation Observers support",
- define => "ENABLE_MUTATION_OBSERVERS", default => 1, value => \$mutationObserversSupport },
+ { option => "mouse-cursor-scale", desc => "Toggle Scaled mouse cursor support",
+ define => "ENABLE_MOUSE_CURSOR_SCALE", default => isEfl(), value => \$mouseCursorScaleSupport },
{ option => "navigator-content-utils", desc => "Toggle Navigator Content Utils support",
define => "ENABLE_NAVIGATOR_CONTENT_UTILS", default => (isBlackBerry() || isEfl()), value => \$registerProtocolHandlerSupport },
@@ -343,6 +371,9 @@ my @features = (
{ option => "network-info", desc => "Toggle Network Info support",
define => "ENABLE_NETWORK_INFO", default => (isEfl() || isBlackBerry()), value => \$networkInfoSupport },
+ { option => "nosniff", desc => "Toggle support for 'X-Content-Type-Options: nosniff'",
+ define => "ENABLE_NOSNIFF", default => 0, value => \$nosniffSupport },
+
{ option => "notifications", desc => "Toggle Notifications support",
define => "ENABLE_NOTIFICATIONS", default => isBlackBerry(), value => \$notificationsSupport },
@@ -350,10 +381,13 @@ my @features = (
define => "ENABLE_ORIENTATION_EVENTS", default => isBlackBerry(), value => \$orientationEventsSupport },
{ option => "page-visibility-api", desc => "Toggle Page Visibility API support",
- define => "ENABLE_PAGE_VISIBILITY_API", default => (isBlackBerry() || isEfl()), value => \$pageVisibilityAPISupport },
+ define => "ENABLE_PAGE_VISIBILITY_API", default => (isBlackBerry() || isEfl() || isGtk()), value => \$pageVisibilityAPISupport },
+
+ { option => "performance-timeline", desc => "Toggle Performance Timeline support",
+ define => "ENABLE_PERFORMANCE_TIMELINE", default => isGtk(), value => \$performanceTimelineSupport },
- { option => "progress-tag", desc => "Toggle Progress Tag support",
- define => "ENABLE_PROGRESS_ELEMENT", default => 1, value => \$progressTagSupport },
+ { option => "progress-element", desc => "Toggle Progress Element support",
+ define => "ENABLE_PROGRESS_ELEMENT", default => 1, value => \$progressElementSupport },
{ option => "proximity-events", desc => "Toggle Proximity Events support",
define => "ENABLE_PROXIMITY_EVENTS", default => 0, value => \$proximityEventsSupport },
@@ -362,16 +396,22 @@ my @features = (
define => "ENABLE_QUOTA", default => 0, value => \$quotaSupport },
{ option => "resolution-media-query", desc => "Toggle resolution media query support",
- define => "ENABLE_RESOLUTION_MEDIA_QUERY", default => (isEfl() || isQt()), value => \$resolutionMediaQuerySupport },
+ define => "ENABLE_RESOLUTION_MEDIA_QUERY", default => isEfl(), value => \$resolutionMediaQuerySupport },
+
+ { option => "resource-timing", desc => "Toggle Resource Timing support",
+ define => "ENABLE_RESOURCE_TIMING", default => isGtk(), value => \$resourceTimingSupport },
{ option => "request-animation-frame", desc => "Toggle Request Animation Frame support",
define => "ENABLE_REQUEST_ANIMATION_FRAME", default => (isAppleMacWebKit() || isGtk() || isEfl() || isBlackBerry()), value => \$requestAnimationFrameSupport },
+ { option => "seccomp-filters", desc => "Toggle Seccomp Filter sandbox",
+ define => "ENABLE_SECCOMP_FILTERS", default => 0, value => \$seccompFiltersSupport },
+
{ option => "scripted-speech", desc => "Toggle Scripted Speech support",
define => "ENABLE_SCRIPTED_SPEECH", default => 0, value => \$scriptedSpeechSupport },
{ option => "shadow-dom", desc => "Toggle Shadow DOM support",
- define => "ENABLE_SHADOW_DOM", default => (isGtk() || isEfl()), value => \$shadowDOMSupport },
+ define => "ENABLE_SHADOW_DOM", default => 0, value => \$shadowDOMSupport },
{ option => "shared-workers", desc => "Toggle Shared Workers support",
define => "ENABLE_SHARED_WORKERS", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$sharedWorkersSupport },
@@ -380,31 +420,34 @@ my @features = (
define => "ENABLE_SQL_DATABASE", default => 1, value => \$sqlDatabaseSupport },
{ option => "style-scoped", desc => "Toggle Style Scoped support",
- define => "ENABLE_STYLE_SCOPED", default => isBlackBerry(), value => \$styleScopedSupport },
+ define => "ENABLE_STYLE_SCOPED", default => (isBlackBerry() || isGtk()), value => \$styleScopedSupport },
+
+ { option => "suid-linux-sandbox", desc => "Toggle suid sandbox for linux",
+ define => "ENABLE_SUID_SANDBOX_LINUX", default => 0, value => \$suidLinuxSandbox },
{ option => "svg", desc => "Toggle SVG support",
define => "ENABLE_SVG", default => 1, value => \$svgSupport },
- { option => "svg-dom-objc-bindings", desc => "Toggle SVG DOM ObjC Bindings support",
- define => "ENABLE_SVG_DOM_OBJC_BINDINGS", default => isAppleMacWebKit(), value => \$svgDOMObjCBindingsSupport },
-
{ option => "svg-fonts", desc => "Toggle SVG Fonts support",
define => "ENABLE_SVG_FONTS", default => 1, value => \$svgFontsSupport },
{ option => "system-malloc", desc => "Toggle system allocator instead of TCmalloc",
- define => "USE_SYSTEM_MALLOC", default => isWinCE(), value => \$systemMallocSupport },
+ define => "USE_SYSTEM_MALLOC", default => (isBlackBerry() || isWinCE()), value => \$systemMallocSupport },
- { option => "template-tag", desc => "Toggle Templates Tag support",
- define => "ENABLE_TEMPLATE_ELEMENT", default => !isAppleWinWebKit(), value => \$templateTagSupport },
+ { option => "template-element", desc => "Toggle HTMLTemplateElement support",
+ define => "ENABLE_TEMPLATE_ELEMENT", default => (isEfl() || isGtk()), value => \$templateElementSupport },
{ option => "text-autosizing", desc => "Toggle Text Autosizing support",
- define => "ENABLE_TEXT_AUTOSIZING", default => 0, value => \$textAutosizingSupport },
+ define => "ENABLE_TEXT_AUTOSIZING", default => isBlackBerry(), value => \$textAutosizingSupport },
{ option => "tiled-backing-store", desc => "Toggle Tiled Backing Store support",
- define => "WTF_USE_TILED_BACKING_STORE", default => (isQt() || isEfl()), value => \$tiledBackingStoreSupport },
+ define => "WTF_USE_TILED_BACKING_STORE", default => isEfl(), value => \$tiledBackingStoreSupport },
+
+ { option => "threaded-html-parser", desc => "Toggle threaded HTML parser support",
+ define => "ENABLE_THREADED_HTML_PARSER", default => 0, value => \$threadedHTMLParserSupport },
{ option => "touch-events", desc => "Toggle Touch Events support",
- define => "ENABLE_TOUCH_EVENTS", default => (isQt() || isBlackBerry() || isEfl()), value => \$touchEventsSupport },
+ define => "ENABLE_TOUCH_EVENTS", default => (isBlackBerry() || isEfl()), value => \$touchEventsSupport },
{ option => "touch-slider", desc => "Toggle Touch Slider support",
define => "ENABLE_TOUCH_SLIDER", default => isBlackBerry(), value => \$touchSliderSupport },
@@ -412,6 +455,9 @@ my @features = (
{ option => "touch-icon-loading", desc => "Toggle Touch Icon Loading Support",
define => "ENABLE_TOUCH_ICON_LOADING", default => 0, value => \$touchIconLoadingSupport },
+ { option => "user-timing", desc => "Toggle User Timing support",
+ define => "ENABLE_USER_TIMING", default => isGtk(), value => \$userTimingSupport },
+
{ option => "vibration", desc => "Toggle Vibration support",
define => "ENABLE_VIBRATION", default => (isEfl() || isBlackBerry()), value => \$vibrationSupport },
@@ -422,16 +468,10 @@ my @features = (
define => "ENABLE_VIDEO_TRACK", default => (isAppleWebKit() || isGtk() || isEfl() || isBlackBerry()), value => \$videoTrackSupport },
{ option => "webgl", desc => "Toggle WebGL support",
- define => "ENABLE_WEBGL", default => (isAppleMacWebKit() || isGtk()), value => \$webglSupport },
+ define => "ENABLE_WEBGL", default => (isAppleMacWebKit() || isGtk() || isEfl()), value => \$webglSupport },
{ option => "web-audio", desc => "Toggle Web Audio support",
- define => "ENABLE_WEB_AUDIO", default => 0, value => \$webAudioSupport },
-
- { option => "web-intents", desc => "Toggle Web Intents support",
- define => "ENABLE_WEB_INTENTS", default => isEfl(), value => \$webIntentsSupport },
-
- { option => "web-intents-tag", desc => "Toggle Web Intents Tag support",
- define => "ENABLE_WEB_INTENTS_TAG", default => isEfl(), value => \$webIntentsTagSupport },
+ define => "ENABLE_WEB_AUDIO", default => (isEfl() || isGtk()), value => \$webAudioSupport },
{ option => "web-sockets", desc => "Toggle Web Sockets support",
define => "ENABLE_WEB_SOCKETS", default => 1, value => \$webSocketsSupport },
@@ -442,9 +482,6 @@ my @features = (
{ option => "workers", desc => "Toggle Workers support",
define => "ENABLE_WORKERS", default => (isAppleWebKit() || isGtk() || isBlackBerry() || isEfl()), value => \$workersSupport },
- { option => "xhr-response-blob", desc => "Toggle XHR Response BLOB support",
- define => "ENABLE_XHR_RESPONSE_BLOB", default => isBlackBerry(), value => \$xhrResponseBlobSupport },
-
{ option => "xhr-timeout", desc => "Toggle XHR Timeout support",
define => "ENABLE_XHR_TIMEOUT", default => (isEfl() || isGtk() || isAppleMacWebKit()), value => \$xhrTimeoutSupport },
diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl
index d4165620b..5b30fcbb8 100644
--- a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl
+++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiff.pl
@@ -980,11 +980,11 @@ index f5d5e74..3b6aa92 100644
END
expectedReturn => [
[{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: Makefile
index f5d5e74..3b6aa92 100644
---- Makefile
-+++ Makefile
+--- Makefile\t(revision 0)
++++ Makefile\t(working copy)
@@ -1,1 +1,1 @@ public:
END
indexPath => "Makefile",
@@ -1008,11 +1008,11 @@ index 863339f..db418b2 100644
END
expectedReturn => [
[{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo
index 863339f..db418b2 100644
---- foo
-+++ foo
+--- foo\t(revision 0)
++++ foo\t(working copy)
@@ -1 +1,2 @@
Passed
+
@@ -1039,12 +1039,12 @@ index d45dd40..3494526 100644
END
expectedReturn => [
[{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo.h
new file mode 100644
index 0000000..3c9f114
---- foo.h
-+++ foo.h
+--- foo.h\t(revision 0)
++++ foo.h\t(working copy)
@@ -0,0 +1,34 @@
+<html>
END
@@ -1071,12 +1071,12 @@ index d45dd40..3494526 100644
END
expectedReturn => [
[{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo
deleted file mode 100644
index 1e50d1d..0000000
---- foo
-+++ foo
+--- foo\t(revision 0)
++++ foo\t(working copy)
@@ -1,1 +0,0 @@
-line1
END
@@ -1103,11 +1103,11 @@ Index: Makefile_new
END
expectedReturn => [
[{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: Makefile
index f5d5e74..3b6aa92 100644
---- Makefile
-+++ Makefile
+--- Makefile\t(revision 0)
++++ Makefile\t(working copy)
@@ -1,1 +1,1 @@ public:
Index: Makefile_new
===================================================================
@@ -1199,14 +1199,14 @@ END
indexPath => "foo_new",
isGit => 1,
numTextChunks => 1,
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo_new
similarity index 99%
rename from foo
rename to foo_new
index 1e50d1d..1459d21 100644
---- foo_new
-+++ foo_new
+--- foo_new\t(revision 0)
++++ foo_new\t(working copy)
@@ -15,3 +15,4 @@ release r deployment dep deploy:
line1
line2
diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl
index ee9fff903..2b1d1a201 100644
--- a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl
+++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseDiffWithMockFiles.pl
@@ -1,6 +1,7 @@
#!/usr/bin/perl -w
#
# Copyright (C) 2011 Research In Motion Limited. All rights reserved.
+# Copyright (C) 2013 Apple Inc. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
@@ -26,6 +27,33 @@ use POSIX qw/getcwd/;
use Test::More;
use VCSUtils;
+# We should consider moving escapeNewLineCharacters() and toMacLineEndings()
+# to VCSUtils.pm if they're useful in other places.
+sub escapeNewLineCharacters($)
+{
+ my ($text) = @_;
+ my @characters = split(//, $text);
+ my $result = "";
+ foreach (@characters) {
+ if (/^\r$/) {
+ $result .= '\r';
+ next;
+ }
+ if (/^\n$/) {
+ $result .= '\n';
+ }
+ $result .= $_;
+ }
+ return $result;
+}
+
+sub toMacLineEndings($)
+{
+ my ($text) = @_;
+ $text =~ s/\n/\r/g;
+ return $text;
+}
+
my $gitDiffHeaderForNewFile = <<EOF;
diff --git a/Makefile b/Makefile
new file mode 100644
@@ -43,20 +71,20 @@ index 756e864..04d2ae1 100644
@@ -1,3 +1,4 @@
EOF
-my $svnConvertedGitDiffHeader = <<EOF;
+my $svnConvertedGitDiffHeader = <<"EOF";
Index: Makefile
index 756e864..04d2ae1 100644
---- Makefile
-+++ Makefile
+--- Makefile\t(revision 0)
++++ Makefile\t(working copy)
@@ -1,3 +1,4 @@
EOF
-my $svnConvertedGitDiffHeaderForNewFile = <<EOF;
+my $svnConvertedGitDiffHeaderForNewFile = <<"EOF";
Index: Makefile
new file mode 100644
index 0000000..756e864
---- Makefile
-+++ Makefile
+--- Makefile\t(revision 0)
++++ Makefile\t(working copy)
@@ -0,0 +1,17 @@
EOF
@@ -92,6 +120,7 @@ EOF
my $mockDir = File::Temp->tempdir("parseDiffXXXX", CLEANUP => 1);
writeToFile(File::Spec->catfile($mockDir, "MakefileWithUnixEOL"), $MakefileContents);
writeToFile(File::Spec->catfile($mockDir, "MakefileWithWindowsEOL"), toWindowsLineEndings($MakefileContents));
+writeToFile(File::Spec->catfile($mockDir, "MakefileWithMacEOL"), toMacLineEndings($MakefileContents));
# The array of test cases.
my @testCaseHashRefs = (
@@ -145,6 +174,76 @@ undef],
},
{
# New test
+ diffName => "SVN: Patch adds Windows newline to EOF and IndexPath has Windows line endings",
+ inputText => <<"EOF",
+Index: MakefileWithWindowsEOL
+===================================================================
+--- MakefileWithWindowsEOL (revision 53052)
++++ MakefileWithWindowsEOL (working copy)
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => <<"EOF",
+Index: MakefileWithWindowsEOL
+===================================================================
+--- MakefileWithWindowsEOL (revision 53052)
++++ MakefileWithWindowsEOL (working copy)
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ indexPath => "MakefileWithWindowsEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => 53052
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "SVN: Patch adds Mac newline to EOF and IndexPath has Mac line endings",
+ inputText => <<"EOF",
+Index: MakefileWithMacEOL
+===================================================================
+--- MakefileWithMacEOL (revision 53052)
++++ MakefileWithMacEOL (working copy)
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => q(Index: MakefileWithMacEOL
+===================================================================
+--- MakefileWithMacEOL (revision 53052)
++++ MakefileWithMacEOL (working copy)
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r),
+ indexPath => "MakefileWithMacEOL",
+ isSvn => 1,
+ numTextChunks => 1,
+ sourceRevision => 53052
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
diffName => "SVN: Patch with Unix line endings and IndexPath has Windows line endings",
inputText => substituteString($svnDiffHeader, "Makefile", "MakefileWithWindowsEOL") . $diffBody,
expectedReturn => [
@@ -235,6 +334,74 @@ undef],
},
{
# New test
+ diffName => "Git: Patch adds newline to EOF with Windows line endings and IndexPath has Windows line endings",
+ inputText => <<"EOF",
+diff --git a/MakefileWithWindowsEOL b/MakefileWithWindowsEOL
+index e7e8475..ae16fc3 100644
+--- a/MakefileWithWindowsEOL
++++ b/MakefileWithWindowsEOL
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => <<"EOF",
+Index: MakefileWithWindowsEOL
+index e7e8475..ae16fc3 100644
+--- MakefileWithWindowsEOL\t(revision 0)
++++ MakefileWithWindowsEOL\t(working copy)
+@@ -1,3 +1,4 @@\r
+ MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r
+ \r
+-all:
+\\ No newline at end of file
++all:\r
++\r
+EOF
+ indexPath => "MakefileWithWindowsEOL",
+ isGit => 1,
+ numTextChunks => 1
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
+ diffName => "Git: Patch adds Mac newline to EOF and IndexPath has Mac line endings",
+ inputText => <<"EOF",
+diff --git a/MakefileWithMacEOL b/MakefileWithMacEOL
+index e7e8475..ae16fc3 100644
+--- a/MakefileWithMacEOL
++++ b/MakefileWithMacEOL
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r
+EOF
+ expectedReturn => [
+[{
+ # Same as input text
+ svnConvertedText => qq(Index: MakefileWithMacEOL
+index e7e8475..ae16fc3 100644
+--- MakefileWithMacEOL\t(revision 0)
++++ MakefileWithMacEOL\t(working copy)
+@@ -1,3 +1,4 @@\r MODULES = JavaScriptCore JavaScriptGlue WebCore WebKit WebKitTools\r \r-all:
+\\ No newline at end of file
++all:\r+\r),
+ indexPath => "MakefileWithMacEOL",
+ isGit => 1,
+ numTextChunks => 1
+}],
+undef],
+ expectedNextLine => undef,
+},
+{
+ # New test
diffName => "Git: Patch with Unix line endings and IndexPath has Windows line endings",
inputText => substituteString($gitDiffHeader, "Makefile", "MakefileWithWindowsEOL") . $diffBody,
expectedReturn => [
@@ -294,6 +461,8 @@ foreach my $testCase (@testCaseHashRefs) {
my @got = VCSUtils::parseDiff($fileHandle, $line);
my $expectedReturn = $testCase->{expectedReturn};
+ $got[0][0]->{svnConvertedText} = escapeNewLineCharacters($got[0][0]->{svnConvertedText});
+ $expectedReturn->[0][0]->{svnConvertedText} = escapeNewLineCharacters($expectedReturn->[0][0]->{svnConvertedText});
is_deeply(\@got, $expectedReturn, "$testNameStart return value.");
my $gotNextLine = <$fileHandle>;
diff --git a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl
index bc0d4d41c..841e28387 100644
--- a/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl
+++ b/Tools/Scripts/webkitperl/VCSUtils_unittest/parseGitDiffHeader.pl
@@ -45,11 +45,35 @@ index f5d5e74..3b6aa92 100644
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo.h
index f5d5e74..3b6aa92 100644
---- foo.h
-+++ foo.h
+--- foo.h\t(revision 0)
++++ foo.h\t(working copy)
+END
+ indexPath => "foo.h",
+},
+"@@ -1 +1 @@\n"],
+ expectedNextLine => "-file contents\n",
+},
+{
+ diffName => "Modified file using --src-prefix and --dst-prefix option",
+ inputText => <<'END',
+diff --git s/foo.h d/foo.h
+index f5d5e74..3b6aa92 100644
+--- s/foo.h
++++ d/foo.h
+@@ -1 +1 @@
+-file contents
++new file contents
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<"END",
+Index: foo.h
+index f5d5e74..3b6aa92 100644
+--- foo.h\t(revision 0)
++++ foo.h\t(working copy)
END
indexPath => "foo.h",
},
@@ -57,6 +81,54 @@ END
expectedNextLine => "-file contents\n",
},
{ # New test
+ diffName => "Modified file which have space characters in path",
+ inputText => <<'END',
+diff --git a/foo bar.h b/foo bar.h
+index f5d5e74..3b6aa92 100644
+--- a/foo bar.h
++++ b/foo bar.h
+@@ -1 +1 @@
+-file contents
++new file contents
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<"END",
+Index: foo bar.h
+index f5d5e74..3b6aa92 100644
+--- foo bar.h\t(revision 0)
++++ foo bar.h\t(working copy)
+END
+ indexPath => "foo bar.h",
+},
+"@@ -1 +1 @@\n"],
+ expectedNextLine => "-file contents\n",
+},
+{ # New test
+ diffName => "Modified file which have space characters in path using --no-prefix",
+ inputText => <<'END',
+diff --git directory/foo bar.h directory/foo bar.h
+index f5d5e74..3b6aa92 100644
+--- directory/foo bar.h
++++ directory/foo bar.h
+@@ -1 +1 @@
+-file contents
++new file contents
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<"END",
+Index: directory/foo bar.h
+index f5d5e74..3b6aa92 100644
+--- directory/foo bar.h\t(revision 0)
++++ directory/foo bar.h\t(working copy)
+END
+ indexPath => "directory/foo bar.h",
+},
+"@@ -1 +1 @@\n"],
+ expectedNextLine => "-file contents\n",
+},
+{ # New test
diffName => "new file",
inputText => <<'END',
diff --git a/foo.h b/foo.h
@@ -69,12 +141,12 @@ index 0000000..3c9f114
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo.h
new file mode 100644
index 0000000..3c9f114
---- foo.h
-+++ foo.h
+--- foo.h\t(revision 0)
++++ foo.h\t(working copy)
END
indexPath => "foo.h",
isNew => 1,
@@ -97,12 +169,12 @@ index d45dd40..3494526 100644
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo
deleted file mode 100644
index 1e50d1d..0000000
---- foo
-+++ foo
+--- foo\t(revision 0)
++++ foo\t(working copy)
END
indexPath => "foo",
isDeletion => 1,
@@ -110,6 +182,34 @@ END
"@@ -1,1 +0,0 @@\n"],
expectedNextLine => "-line1\n",
},
+{
+ diffName => "delete file which have space characters in path using --no-prefix",
+ inputText => <<'END',
+diff --git directory/foo bar.h directory/foo bar.h
+deleted file mode 100644
+index 1e50d1d..0000000
+--- directory/foo bar.h
++++ /dev/null
+@@ -1,1 +0,0 @@
+-line1
+diff --git a/configure.ac b/configure.ac
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<"END",
+Index: directory/foo bar.h
+deleted file mode 100644
+index 1e50d1d..0000000
+--- directory/foo bar.h\t(revision 0)
++++ directory/foo bar.h\t(working copy)
+END
+ indexPath => "directory/foo bar.h",
+ isDeletion => 1,
+},
+"@@ -1,1 +0,0 @@\n"],
+ expectedNextLine => "-line1\n",
+},
{ # New test
diffName => "using --no-prefix",
inputText => <<'END',
@@ -122,11 +222,11 @@ index c925780..9e65c43 100644
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo.h
index c925780..9e65c43 100644
---- foo.h
-+++ foo.h
+--- foo.h\t(revision 0)
++++ foo.h\t(working copy)
END
indexPath => "foo.h",
},
@@ -160,6 +260,30 @@ END
"diff --git a/bar b/bar\n"],
expectedNextLine => "index d45dd40..3494526 100644\n",
},
+{
+ diffName => "copy file which have space characters in path using --no-prefix (with similarity index 100%)",
+ inputText => <<'END',
+diff --git directory/foo bar.h directory/foo baz.h
+similarity index 100%
+copy from directory/foo bar.h
+copy to directory/foo baz.h
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: directory/foo baz.h
+similarity index 100%
+copy from directory/foo bar.h
+copy to directory/foo baz.h
+END
+ copiedFromPath => "directory/foo bar.h",
+ indexPath => "directory/foo baz.h",
+},
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
{ # New test
diffName => "copy (with similarity index < 100%)",
inputText => <<'END',
@@ -210,6 +334,31 @@ END
"diff --git a/bar b/bar\n"],
expectedNextLine => "index d45dd40..3494526 100644\n",
},
+{
+ diffName => "rename file which have space characters in path using --no-prefix (with similarity index 100%)",
+ inputText => <<'END',
+diff --git directory/foo bar.h directory/foo baz.h
+similarity index 100%
+rename from directory/foo bar.h
+rename to directory/foo baz.h
+diff --git a/bar b/bar
+index d45dd40..3494526 100644
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<'END',
+Index: directory/foo baz.h
+similarity index 100%
+rename from directory/foo bar.h
+rename to directory/foo baz.h
+END
+ copiedFromPath => "directory/foo bar.h",
+ indexPath => "directory/foo baz.h",
+ shouldDeleteSource => 1,
+},
+"diff --git a/bar b/bar\n"],
+ expectedNextLine => "index d45dd40..3494526 100644\n",
+},
{ # New test
diffName => "rename (with similarity index < 100%)",
inputText => <<'END',
@@ -230,14 +379,14 @@ index d45dd40..3494526 100644
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo_new
similarity index 99%
rename from foo
rename to foo_new
index 1e50d1d..1459d21 100644
---- foo_new
-+++ foo_new
+--- foo_new\t(revision 0)
++++ foo_new\t(working copy)
END
copiedFromPath => "foo",
indexPath => "foo_new",
@@ -359,11 +508,11 @@ index d03e242..435ad3a 100755
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo
index d03e242..435ad3a 100755
---- foo
-+++ foo
+--- foo\t(revision 0)
++++ foo\t(working copy)
END
indexPath => "foo",
},
@@ -429,12 +578,12 @@ index 0000000..d03e242
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo
new file mode 100755
index 0000000..d03e242
---- foo
-+++ foo
+--- foo\t(revision 0)
++++ foo\t(working copy)
END
executableBitDelta => 1,
indexPath => "foo",
@@ -458,12 +607,12 @@ index d03e242..0000000
END
expectedReturn => [
{
- svnConvertedText => <<'END',
+ svnConvertedText => <<"END",
Index: foo
deleted file mode 100755
index d03e242..0000000
---- foo
-+++ foo
+--- foo\t(revision 0)
++++ foo\t(working copy)
END
executableBitDelta => -1,
indexPath => "foo",
@@ -472,6 +621,37 @@ END
"@@ -1 +0,0 @@\n"],
expectedNextLine => "-file contents\n",
},
+{
+ # svn-apply rejected https://bug-111042-attachments.webkit.org/attachment.cgi?id=190663
+ diffName => "Modified file which have space characters in path. svn-apply rejected attachment #190663.",
+ inputText => <<'END',
+diff --git a/WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme b/WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme
+index 72d60effb9bbba0520e520ec3c1aa43f348c6997..b7924b96d5978c1ad1053dca7e554023235d9a16 100644
+--- a/WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme
++++ b/WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme
+@@ -161,7 +161,7 @@
+ <EnvironmentVariables>
+ <EnvironmentVariable
+ key = "DYLD_INSERT_LIBRARIES"
+- value = "$(BUILT_PRODUCTS_DIR)/WebProcessShim.dylib"
++ value = "$(BUILT_PRODUCTS_DIR)/SecItemShim.dylib"
+ isEnabled = "YES">
+ </EnvironmentVariable>
+ </EnvironmentVariables>
+END
+ expectedReturn => [
+{
+ svnConvertedText => <<"END",
+Index: WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme
+index 72d60effb9bbba0520e520ec3c1aa43f348c6997..b7924b96d5978c1ad1053dca7e554023235d9a16 100644
+--- WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme\t(revision 0)
++++ WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme\t(working copy)
+END
+ indexPath => "WebKit.xcworkspace/xcshareddata/xcschemes/All Source (target WebProcess).xcscheme",
+},
+"@@ -161,7 +161,7 @@\n"],
+ expectedNextLine => " <EnvironmentVariables>\n",
+},
);
my $testCasesCount = @testCaseHashRefs;
diff --git a/Tools/Scripts/webkitperl/filter-build-webkit_unittest/shouldIgnoreLine_unittests.pl b/Tools/Scripts/webkitperl/filter-build-webkit_unittest/shouldIgnoreLine_unittests.pl
new file mode 100644
index 000000000..0970e9aa2
--- /dev/null
+++ b/Tools/Scripts/webkitperl/filter-build-webkit_unittest/shouldIgnoreLine_unittests.pl
@@ -0,0 +1,120 @@
+#!/usr/bin/perl -w
+#
+# Copyright (C) 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script tests parts of filter-build-webkit.
+# This script runs the unittests specified in @testFiles.
+
+use strict;
+use warnings;
+
+use English;
+use FindBin;
+use Test::More;
+use lib File::Spec->catdir($FindBin::Bin, "..");
+use LoadAsModule qw(FilterBuildWebKit filter-build-webkit);
+
+sub description($);
+
+@FilterBuildWebKit::EXPORT_OK = qw(shouldIgnoreLine);
+FilterBuildWebKit->import(@FilterBuildWebKit::EXPORT_OK);
+
+#
+# Test whitespace
+#
+is(shouldIgnoreLine("", ""), 1, "Ignored: empty line");
+is(shouldIgnoreLine("", " "), 1, "Ignored: one space");
+is(shouldIgnoreLine("", "\t"), 1, "Ignored: one tab");
+
+#
+# Test input that should be ignored regardless of previous line
+#
+my @expectIgnoredLines = split(/$INPUT_RECORD_SEPARATOR/, <<'END');
+make: Nothing to be done for `all'.
+JavaScriptCore/create_hash_table JavaScriptCore/runtime/ArrayConstructor.cpp -i > ArrayConstructor.lut.h
+Creating hashtable for JavaScriptCore/runtime/ArrayConstructor.cpp
+Wrote output to /Volumes/Data/Build/Release/DerivedSources/WebCore/ExportFileGenerator.cpp
+/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/libtool: file: /Volumes/Data/Build/ANGLE.build/Release/ANGLE.build/Objects-normal/i386/debug.o has no symbols
+Showing first 200 notices only
+printf "WebCore/Modules/encryptedmedia/MediaKeyMessageEvent.idl\nWebCore/Modules/encryptedmedia/MediaKeyNeededEvent.idl\nWebCore/Modules/encryptedmedia/MediaKeySession.idl\nWebCore/Modules/encryptedmedia/MediaKeys.idl\nWebCore/Modules/filesystem/DOMFileSystem.idl\nWebCore/Modules/filesystem/DOMFileSystemSync.idl\nWebCore/Modules/filesystem/DOMWindowFileSystem.idl\nWebCore/Modules/filesystem/DirectoryEntry.idl\nWebCore/Modules/filesystem/DirectoryEntrySync.idl\nWebCore/Modules/filesystem/DirectoryReader.idl\nWebCore/Modules/filesystem/DirectoryReaderSync.idl\nWebCore/Modules/filesystem/EntriesCallback.idl\nWebCore/Modules/filesystem/Entry.idl\nWebCore/Modules/filesystem/EntryArray.idl\nWebCore/Modules/filesystem/EntryArraySync.idl\nWebCore/Modules/filesystem/EntryCallback.idl\nWebCore/Modules/filesystem/EntrySync.idl\nWebCore/Modules/filesystem/ErrorCallback.idl\nWebCore/Modules/filesystem/FileCallback.idl\nWebCore/Modules/filesystem/FileEntry.idl\nWebCore/Modules/filesystem/FileEntrySync.idl\nWebCore/Modules/filesystem/FileSystemCallback.idl\nWebCore/Modules/filesystem/FileWriter.idl\nWebCore/Modules/filesystem/FileWriterCallback.idl\nWebCore/Modules/filesystem/FileWriterSync.idl\nWebCore/Modules/filesystem/Metadata.idl\nWebCore/Modules/filesystem/MetadataCallback.idl\nWebCore/Modules/filesystem/WorkerContextFileSystem.idl\nWebCore/Modules/geolocation/Coordinates.idl\nWebCore/Modules/geolocation/Geolocation.idl\nWebCore/Modules/geolocation/Geoposition.idl\nWebCore/Modules/geolocation/NavigatorGeolocation.idl\nWebCore/Modules/geolocation/PositionCallback.idl\nWebCore/Modules/geolocation/PositionError.idl\nWebCore/Modules/geolocation/PositionErrorCallback.idl\nWebCore/Modules/indexeddb/DOMWindowIndexedDatabase.idl\nWebCore/Modules/indexeddb/IDBAny.idl\nWebCore/Modules/indexeddb/IDBCursor.idl\nWebCore/Modules/indexeddb/IDBDatabase.idl\nWebCore/Modules/indexeddb/IDBFactory.idl\nWebCore/Modules/indexeddb/IDBIndex.idl\nWebCore/Modules/indexeddb/IDBKeyRange.idl\nWebCore/Modules/indexeddb/IDBObjectStore.idl\nWebCore/Modules/indexeddb/IDBRequest.idl\nWebCore/Modules/indexeddb/IDBTransaction.idl\nWebCore/Modules/indexeddb/IDBVersionChangeEvent.idl\nWebCore/Modules/indexeddb/WorkerContextIndexedDatabase.idl\nWebCore/Modules/mediasource/MediaSource.idl\nWebCore/Modules/mediasource/SourceBuffer.idl\nWebCore/Modules/mediasource/SourceBufferList.idl\nWebCore/Modules/notifications/DOMWindowNotifications.idl\nWebCore/Modules/notifications/Notification.idl\nWebCore/Modules/notifications/NotificationCenter.idl\nWebCore/Modules/notifications/NotificationPermissionCallback.idl\nWebCore/Modules/notifications/WorkerContextNotifications.idl\nWebCore/Modules/quota/DOMWindowQuota.idl\nWebCore/Modules/quota/NavigatorStorageQuota.idl\nWebCore/Modules/quota/StorageInfo.idl\nWebCore/Modules/quota/StorageErrorCallback.idl\nWebCore/Modules/quota/StorageQuota.idl\nWebCore/Modules/quota/StorageQuotaCallback.idl\nWebCore/Modules/quota/StorageUsageCallback.idl\nWebCore/Modules/quota/WorkerNavigatorStorageQuota.idl\nWebCore/Modules/speech/DOMWindowSpeechSynthesis.idl\nWebCore/Modules/speech/SpeechSynthesis.idl\nWebCore/Modules/speech/SpeechSynthesisEvent.idl\nWebCore/Modules/speech/SpeechSynthesisUtterance.idl\nWebCore/Modules/speech/SpeechSynthesisVoice.idl\nWebCore/Modules/webaudio/AudioBuffer.idl\nWebCore/Modules/webaudio/AudioBufferCallback.idl\nWebCore/Modules/webaudio/AudioBufferSourceNode.idl\nWebCore/Modules/webaudio/ChannelMergerNode.idl\nWebCore/Modules/webaudio/ChannelSplitterNode.idl\nWebCore/Modules/webaudio/AudioContext.idl\nWebCore/Modules/webaudio/AudioDestinationNode.idl\nWebCore/Modules/webaudio/GainNode.idl\nWebCore/Modules/webaudio/AudioListener.idl\nWebCore/Modules/webaudio/AudioNode.idl\nWebCore/Modules/webaudio/PannerNode.idl\nWebCore/Modules/webaudio/AudioParam.idl\nWebCore/Modules/webaudio/AudioProcessingEvent.idl\nWebCore/Modules/webaudio/BiquadFilterNode.idl\nWebCore/Modules/webaudio/ConvolverNode.idl\nWebCore/Modules/webaudio/DOMWindowWebAudio.idl\nWebCore/Modules/webaudio/DelayNode.idl\nWebCore/Modules/webaudio/DynamicsCompressorNode.idl\nWebCore/Modules/webaudio/ScriptProcessorNode.idl\nWebCore/Modules/webaudio/MediaElementAudioSourceNode.idl\nWebCore/Modules/webaudio/MediaStreamAudioSourceNode.idl\nWebCore/Modules/webaudio/OscillatorNode.idl\nWebCore/Modules/webaudio/OfflineAudioContext.idl\nWebCore/Modules/webaudio/OfflineAudioCompletionEvent.idl\nWebCore/Modules/webaudio/AnalyserNode.idl\nWebCore/Modules/webaudio/WaveShaperNode.idl\nWebCore/Modules/webaudio/WaveTable.idl\nWebCore/Modules/webdatabase/DOMWindowWebDatabase.idl\nWebCore/Modules/webdatabase/Database.idl\nWebCore/Modules/webdatabase/DatabaseCallback.idl\nWebCore/Modules/webdatabase/DatabaseSync.idl\nWebCore/Modules/webdatabase/SQLError.idl\nWebCore/Modules/webdatabase/SQLException.idl\nWebCore/Modules/webdatabase/SQLResultSet.idl\nWebCore/Modules/webdatabase/SQLResultSetRowList.idl\nWebCore/Modules/webdatabase/SQLStatementCallback.idl\nWebCore/Modules/webdatabase/SQLStatementErrorCallback.idl\nWebCore/Modules/webdatabase/SQLTransaction.idl\nWebCore/Modules/webdatabase/SQLTransactionCallback.idl\nWebCore/Modules/webdatabase/SQLTransactionErrorCallback.idl\nWebCore/Modules/webdatabase/SQLTransactionSync.idl\nWebCore/Modules/webdatabase/SQLTransactionSyncCallback.idl\nWebCore/Modules/webdatabase/WorkerContextWebDatabase.idl\nWebCore/Modules/websockets/CloseEvent.idl\nWebCore/Modules/websockets/DOMWindowWebSocket.idl\nWebCore/Modules/websockets/WebSocket.idl\nWebCore/Modules/websockets/WorkerContextWebSocket.idl\nWebCore/css/CSSCharsetRule.idl\nWebCore/css/CSSFontFaceLoadEvent.idl\nWebCore/css/CSSFontFaceRule.idl\nWebCore/css/CSSHostRule.idl\nWebCore/css/CSSImportRule.idl\nWebCore/css/CSSMediaRule.idl\nWebCore/css/CSSPageRule.idl\nWebCore/css/CSSPrimitiveValue.idl\nWebCore/css/CSSRule.idl\nWebCore/css/CSSRuleList.idl\nWebCore/css/CSSStyleDeclaration.idl\nWebCore/css/CSSStyleRule.idl\nWebCore/css/CSSStyleSheet.idl\nWebCore/css/CSSSupportsRule.idl\nWebCore/css/CSSUnknownRule.idl\nWebCore/css/CSSValue.idl\nWebCore/css/CSSValueList.idl\nWebCore/css/Counter.idl\nWebCore/css/DOMWindowCSS.idl\nWebCore/css/FontLoader.idl\nWebCore/css/MediaList.idl\nWebCore/css/MediaQueryList.idl\nWebCore/css/MediaQueryListListener.idl\nWebCore/css/RGBColor.idl\nWebCore/css/Rect.idl\nWebCore/css/StyleMedia.idl\nWebCore/css/StyleSheet.idl\nWebCore/css/StyleSheetList.idl\nWebCore/css/WebKitCSSFilterValue.idl\nWebCore/css/WebKitCSSFilterRule.idl\nWebCore/css/WebKitCSSKeyframeRule.idl\nWebCore/css/WebKitCSSKeyframesRule.idl\nWebCore/css/WebKitCSSMatrix.idl\nWebCore/css/WebKitCSSMixFunctionValue.idl\nWebCore/css/WebKitCSSRegionRule.idl\nWebCore/css/WebKitCSSTransformValue.idl\nWebCore/css/WebKitCSSViewportRule.idl\nWebCore/dom/Attr.idl\nWebCore/dom/BeforeLoadEvent.idl\nWebCore/dom/CDATASection.idl\nWebCore/dom/CharacterData.idl\nWebCore/dom/ClientRect.idl\nWebCore/dom/ClientRectList.idl\nWebCore/dom/Clipboard.idl\nWebCore/dom/Comment.idl\nWebCore/dom/CompositionEvent.idl\nWebCore/dom/CustomElementConstructor.idl\nWebCore/dom/CustomEvent.idl\nWebCore/dom/DOMCoreException.idl\nWebCore/dom/DOMError.idl\nWebCore/dom/DOMImplementation.idl\nWebCore/dom/DOMStringList.idl\nWebCore/dom/DOMStringMap.idl\nWebCore/dom/DataTransferItem.idl\nWebCore/dom/DataTransferItemList.idl\nWebCore/dom/DeviceMotionEvent.idl\nWebCore/dom/DeviceOrientationEvent.idl\nWebCore/dom/Document.idl\nWebCore/dom/DocumentFragment.idl\nWebCore/dom/DocumentType.idl\nWebCore/dom/Element.idl\nWebCore/dom/Entity.idl\nWebCore/dom/EntityReference.idl\nWebCore/dom/ErrorEvent.idl\nWebCore/dom/Event.idl\nWebCore/dom/EventException.idl\nWebCore/dom/EventListener.idl\nWebCore/dom/EventTarget.idl\nWebCore/dom/FocusEvent.idl\nWebCore/dom/HashChangeEvent.idl\nWebCore/dom/KeyboardEvent.idl\nWebCore/dom/MessageChannel.idl\nWebCore/dom/MessageEvent.idl\nWebCore/dom/MessagePort.idl\nWebCore/dom/MouseEvent.idl\nWebCore/dom/MutationEvent.idl\nWebCore/dom/MutationObserver.idl\nWebCore/dom/MutationRecord.idl\nWebCore/dom/DOMNamedFlowCollection.idl\nWebCore/dom/NamedNodeMap.idl\nWebCore/dom/Node.idl\nWebCore/dom/NodeFilter.idl\nWebCore/dom/NodeIterator.idl\nWebCore/dom/NodeList.idl\nWebCore/dom/Notation.idl\nWebCore/dom/OverflowEvent.idl\nWebCore/dom/PageTransitionEvent.idl\nWebCore/dom/PopStateEvent.idl\nWebCore/dom/ProcessingInstruction.idl\nWebCore/dom/ProgressEvent.idl\nWebCore/dom/ProgressEvent.idl\nWebCore/dom/PropertyNodeList.idl\nWebCore/dom/Range.idl\nWebCore/dom/RangeException.idl\nWebCore/dom/RequestAnimationFrameCallback.idl\nWebCore/dom/ShadowRoot.idl\nWebCore/dom/StringCallback.idl\nWebCore/dom/Text.idl\nWebCore/dom/TextEvent.idl\nWebCore/dom/Touch.idl\nWebCore/dom/TouchEvent.idl\nWebCore/dom/TouchList.idl\nWebCore/dom/TransitionEvent.idl\nWebCore/dom/TreeWalker.idl\nWebCore/dom/UIEvent.idl\nWebCore/dom/WebKitAnimationEvent.idl\nWebCore/dom/WebKitNamedFlow.idl\nWebCore/dom/WebKitTransitionEvent.idl\nWebCore/dom/WheelEvent.idl\nWebCore/fileapi/Blob.idl\nWebCore/fileapi/File.idl\nWebCore/fileapi/FileError.idl\nWebCore/fileapi/FileException.idl\nWebCore/fileapi/FileList.idl\nWebCore/fileapi/FileReader.idl\nWebCore/fileapi/FileReaderSync.idl\nWebCore/html/DOMFormData.idl\nWebCore/html/DOMSettableTokenList.idl\nWebCore/html/DOMTokenList.idl\nWebCore/html/DOMURL.idl\nWebCore/html/HTMLAllCollection.idl\nWebCore/html/HTMLAnchorElement.idl\nWebCore/html/HTMLAppletElement.idl\nWebCore/html/HTMLAreaElement.idl\nWebCore/html/HTMLAudioElement.idl\nWebCore/html/HTMLBRElement.idl\nWebCore/html/HTMLBaseElement.idl\nWebCore/html/HTMLBaseFontElement.idl\nWebCore/html/HTMLBodyElement.idl\nWebCore/html/HTMLButtonElement.idl\nWebCore/html/HTMLCanvasElement.idl\nWebCore/html/HTMLCollection.idl\nWebCore/html/HTMLDListElement.idl\nWebCore/html/HTMLDataListElement.idl\nWebCore/html/HTMLDetailsElement.idl\nWebCore/html/HTMLDialogElement.idl\nWebCore/html/HTMLDirectoryElement.idl\nWebCore/html/HTMLDivElement.idl\nWebCore/html/HTMLDocument.idl\nWebCore/html/HTMLElement.idl\nWebCore/html/HTMLEmbedElement.idl\nWebCore/html/HTMLFieldSetElement.idl\nWebCore/html/HTMLFontElement.idl\nWebCore/html/HTMLFormControlsCollection.idl\nWebCore/html/HTMLFormElement.idl\nWebCore/html/HTMLFrameElement.idl\nWebCore/html/HTMLFrameSetElement.idl\nWebCore/html/HTMLHRElement.idl\nWebCore/html/HTMLHeadElement.idl\nWebCore/html/HTMLHeadingElement.idl\nWebCore/html/HTMLHtmlElement.idl\nWebCore/html/HTMLIFrameElement.idl\nWebCore/html/HTMLImageElement.idl\nWebCore/html/HTMLInputElement.idl\nWebCore/html/HTMLKeygenElement.idl\nWebCore/html/HTMLLIElement.idl\nWebCore/html/HTMLLabelElement.idl\nWebCore/html/HTMLLegendElement.idl\nWebCore/html/HTMLLinkElement.idl\nWebCore/html/HTMLMapElement.idl\nWebCore/html/HTMLMarqueeElement.idl\nWebCore/html/HTMLMediaElement.idl\nWebCore/html/HTMLMenuElement.idl\nWebCore/html/HTMLMetaElement.idl\nWebCore/html/HTMLMeterElement.idl\nWebCore/html/HTMLModElement.idl\nWebCore/html/HTMLOListElement.idl\nWebCore/html/HTMLObjectElement.idl\nWebCore/html/HTMLOptGroupElement.idl\nWebCore/html/HTMLOptionElement.idl\nWebCore/html/HTMLOptionsCollection.idl\nWebCore/html/HTMLOutputElement.idl\nWebCore/html/HTMLParagraphElement.idl\nWebCore/html/HTMLParamElement.idl\nWebCore/html/HTMLPreElement.idl\nWebCore/html/HTMLProgressElement.idl\nWebCore/html/HTMLPropertiesCollection.idl\nWebCore/html/HTMLQuoteElement.idl\nWebCore/html/HTMLScriptElement.idl\nWebCore/html/HTMLSelectElement.idl\nWebCore/html/HTMLSourceElement.idl\nWebCore/html/HTMLSpanElement.idl\nWebCore/html/HTMLStyleElement.idl\nWebCore/html/HTMLTableCaptionElement.idl\nWebCore/html/HTMLTableCellElement.idl\nWebCore/html/HTMLTableColElement.idl\nWebCore/html/HTMLTableElement.idl\nWebCore/html/HTMLTableRowElement.idl\nWebCore/html/HTMLTableSectionElement.idl\nWebCore/html/HTMLTemplateElement.idl\nWebCore/html/HTMLTextAreaElement.idl\nWebCore/html/HTMLTitleElement.idl\nWebCore/html/HTMLTrackElement.idl\nWebCore/html/HTMLUListElement.idl\nWebCore/html/HTMLUnknownElement.idl\nWebCore/html/HTMLVideoElement.idl\nWebCore/html/ImageData.idl\nWebCore/html/MediaController.idl\nWebCore/html/MediaError.idl\nWebCore/html/MediaKeyError.idl\nWebCore/html/MediaKeyEvent.idl\nWebCore/html/MicroDataItemValue.idl\nWebCore/html/RadioNodeList.idl\nWebCore/html/TextMetrics.idl\nWebCore/html/TimeRanges.idl\nWebCore/html/ValidityState.idl\nWebCore/html/VoidCallback.idl\nWebCore/html/canvas/ArrayBuffer.idl\nWebCore/html/canvas/ArrayBufferView.idl\nWebCore/html/canvas/CanvasGradient.idl\nWebCore/html/canvas/CanvasPattern.idl\nWebCore/html/canvas/CanvasProxy.idl\nWebCore/html/canvas/CanvasRenderingContext.idl\nWebCore/html/canvas/CanvasRenderingContext2D.idl\nWebCore/html/canvas/DataView.idl\nWebCore/html/canvas/DOMPath.idl\nWebCore/html/canvas/EXTDrawBuffers.idl\nWebCore/html/canvas/EXTTextureFilterAnisotropic.idl\nWebCore/html/canvas/Float32Array.idl\nWebCore/html/canvas/Float64Array.idl\nWebCore/html/canvas/Int16Array.idl\nWebCore/html/canvas/Int32Array.idl\nWebCore/html/canvas/Int8Array.idl\nWebCore/html/canvas/OESElementIndexUint.idl\nWebCore/html/canvas/OESStandardDerivatives.idl\nWebCore/html/canvas/OESTextureFloat.idl\nWebCore/html/canvas/OESTextureHalfFloat.idl\nWebCore/html/canvas/OESVertexArrayObject.idl\nWebCore/html/canvas/Uint16Array.idl\nWebCore/html/canvas/Uint32Array.idl\nWebCore/html/canvas/Uint8Array.idl\nWebCore/html/canvas/Uint8ClampedArray.idl\nWebCore/html/canvas/WebGLActiveInfo.idl\nWebCore/html/canvas/WebGLBuffer.idl\nWebCore/html/canvas/WebGLCompressedTextureATC.idl\nWebCore/html/canvas/WebGLCompressedTexturePVRTC.idl\nWebCore/html/canvas/WebGLCompressedTextureS3TC.idl\nWebCore/html/canvas/WebGLContextAttributes.idl\nWebCore/html/canvas/WebGLContextEvent.idl\nWebCore/html/canvas/WebGLDepthTexture.idl\nWebCore/html/canvas/WebGLFramebuffer.idl\nWebCore/html/canvas/WebGLLoseContext.idl\nWebCore/html/canvas/WebGLProgram.idl\nWebCore/html/canvas/WebGLRenderbuffer.idl\nWebCore/html/canvas/WebGLRenderingContext.idl\nWebCore/html/canvas/WebGLShader.idl\nWebCore/html/canvas/WebGLShaderPrecisionFormat.idl\nWebCore/html/canvas/WebGLTexture.idl\nWebCore/html/canvas/WebGLUniformLocation.idl\nWebCore/html/canvas/WebGLVertexArrayObjectOES.idl\nWebCore/html/shadow/HTMLContentElement.idl\nWebCore/html/shadow/HTMLShadowElement.idl\nWebCore/html/track/TextTrack.idl\nWebCore/html/track/TextTrackCue.idl\nWebCore/html/track/TextTrackCueList.idl\nWebCore/html/track/TextTrackList.idl\nWebCore/html/track/TrackEvent.idl\nWebCore/inspector/InjectedScriptHost.idl\nWebCore/inspector/InspectorFrontendHost.idl\nWebCore/inspector/ScriptProfile.idl\nWebCore/inspector/ScriptProfileNode.idl\nWebCore/loader/appcache/DOMApplicationCache.idl\nWebCore/page/AbstractView.idl\nWebCore/page/BarInfo.idl\nWebCore/page/Console.idl\nWebCore/page/Crypto.idl\nWebCore/page/DOMSecurityPolicy.idl\nWebCore/page/DOMSelection.idl\nWebCore/page/DOMWindow.idl\nWebCore/page/EventSource.idl\nWebCore/page/History.idl\nWebCore/page/Location.idl\nWebCore/page/Navigator.idl\nWebCore/page/Performance.idl\nWebCore/page/PerformanceNavigation.idl\nWebCore/page/PerformanceTiming.idl\nWebCore/page/Screen.idl\nWebCore/page/SpeechInputEvent.idl\nWebCore/page/SpeechInputResult.idl\nWebCore/page/SpeechInputResultList.idl\nWebCore/page/WebKitPoint.idl\nWebCore/page/WorkerNavigator.idl\nWebCore/plugins/DOMMimeType.idl\nWebCore/plugins/DOMMimeTypeArray.idl\nWebCore/plugins/DOMPlugin.idl\nWebCore/plugins/DOMPluginArray.idl\nWebCore/storage/Storage.idl\nWebCore/storage/StorageEvent.idl\nWebCore/svg/ElementTimeControl.idl\nWebCore/svg/SVGAElement.idl\nWebCore/svg/SVGAltGlyphDefElement.idl\nWebCore/svg/SVGAltGlyphElement.idl\nWebCore/svg/SVGAltGlyphItemElement.idl\nWebCore/svg/SVGAngle.idl\nWebCore/svg/SVGAnimateColorElement.idl\nWebCore/svg/SVGAnimateElement.idl\nWebCore/svg/SVGAnimateMotionElement.idl\nWebCore/svg/SVGAnimateTransformElement.idl\nWebCore/svg/SVGAnimatedAngle.idl\nWebCore/svg/SVGAnimatedBoolean.idl\nWebCore/svg/SVGAnimatedEnumeration.idl\nWebCore/svg/SVGAnimatedInteger.idl\nWebCore/svg/SVGAnimatedLength.idl\nWebCore/svg/SVGAnimatedLengthList.idl\nWebCore/svg/SVGAnimatedNumber.idl\nWebCore/svg/SVGAnimatedNumberList.idl\nWebCore/svg/SVGAnimatedPreserveAspectRatio.idl\nWebCore/svg/SVGAnimatedRect.idl\nWebCore/svg/SVGAnimatedString.idl\nWebCore/svg/SVGAnimatedTransformList.idl\nWebCore/svg/SVGAnimationElement.idl\nWebCore/svg/SVGCircleElement.idl\nWebCore/svg/SVGClipPathElement.idl\nWebCore/svg/SVGColor.idl\nWebCore/svg/SVGComponentTransferFunctionElement.idl\nWebCore/svg/SVGCursorElement.idl\nWebCore/svg/SVGDefsElement.idl\nWebCore/svg/SVGDescElement.idl\nWebCore/svg/SVGDocument.idl\nWebCore/svg/SVGElement.idl\nWebCore/svg/SVGElementInstance.idl\nWebCore/svg/SVGElementInstanceList.idl\nWebCore/svg/SVGEllipseElement.idl\nWebCore/svg/SVGException.idl\nWebCore/svg/SVGExternalResourcesRequired.idl\nWebCore/svg/SVGFEBlendElement.idl\nWebCore/svg/SVGFEColorMatrixElement.idl\nWebCore/svg/SVGFEComponentTransferElement.idl\nWebCore/svg/SVGFECompositeElement.idl\nWebCore/svg/SVGFEConvolveMatrixElement.idl\nWebCore/svg/SVGFEDiffuseLightingElement.idl\nWebCore/svg/SVGFEDisplacementMapElement.idl\nWebCore/svg/SVGFEDistantLightElement.idl\nWebCore/svg/SVGFEDropShadowElement.idl\nWebCore/svg/SVGFEFloodElement.idl\nWebCore/svg/SVGFEFuncAElement.idl\nWebCore/svg/SVGFEFuncBElement.idl\nWebCore/svg/SVGFEFuncGElement.idl\nWebCore/svg/SVGFEFuncRElement.idl\nWebCore/svg/SVGFEGaussianBlurElement.idl\nWebCore/svg/SVGFEImageElement.idl\nWebCore/svg/SVGFEMergeElement.idl\nWebCore/svg/SVGFEMergeNodeElement.idl\nWebCore/svg/SVGFEMorphologyElement.idl\nWebCore/svg/SVGFEOffsetElement.idl\nWebCore/svg/SVGFEPointLightElement.idl\nWebCore/svg/SVGFESpecularLightingElement.idl\nWebCore/svg/SVGFESpotLightElement.idl\nWebCore/svg/SVGFETileElement.idl\nWebCore/svg/SVGFETurbulenceElement.idl\nWebCore/svg/SVGFilterElement.idl\nWebCore/svg/SVGFilterPrimitiveStandardAttributes.idl\nWebCore/svg/SVGFitToViewBox.idl\nWebCore/svg/SVGFontElement.idl\nWebCore/svg/SVGFontFaceElement.idl\nWebCore/svg/SVGFontFaceFormatElement.idl\nWebCore/svg/SVGFontFaceNameElement.idl\nWebCore/svg/SVGFontFaceSrcElement.idl\nWebCore/svg/SVGFontFaceUriElement.idl\nWebCore/svg/SVGForeignObjectElement.idl\nWebCore/svg/SVGGElement.idl\nWebCore/svg/SVGGlyphElement.idl\nWebCore/svg/SVGGlyphRefElement.idl\nWebCore/svg/SVGGradientElement.idl\nWebCore/svg/SVGHKernElement.idl\nWebCore/svg/SVGImageElement.idl\nWebCore/svg/SVGLangSpace.idl\nWebCore/svg/SVGLength.idl\nWebCore/svg/SVGLengthList.idl\nWebCore/svg/SVGLineElement.idl\nWebCore/svg/SVGLinearGradientElement.idl\nWebCore/svg/SVGLocatable.idl\nWebCore/svg/SVGMPathElement.idl\nWebCore/svg/SVGMarkerElement.idl\nWebCore/svg/SVGMaskElement.idl\nWebCore/svg/SVGMatrix.idl\nWebCore/svg/SVGMetadataElement.idl\nWebCore/svg/SVGMissingGlyphElement.idl\nWebCore/svg/SVGNumber.idl\nWebCore/svg/SVGNumberList.idl\nWebCore/svg/SVGPaint.idl\nWebCore/svg/SVGPathElement.idl\nWebCore/svg/SVGPathSeg.idl\nWebCore/svg/SVGPathSegArcAbs.idl\nWebCore/svg/SVGPathSegArcRel.idl\nWebCore/svg/SVGPathSegClosePath.idl\nWebCore/svg/SVGPathSegCurvetoCubicAbs.idl\nWebCore/svg/SVGPathSegCurvetoCubicRel.idl\nWebCore/svg/SVGPathSegCurvetoCubicSmoothAbs.idl\nWebCore/svg/SVGPathSegCurvetoCubicSmoothRel.idl\nWebCore/svg/SVGPathSegCurvetoQuadraticAbs.idl\nWebCore/svg/SVGPathSegCurvetoQuadraticRel.idl\nWebCore/svg/SVGPathSegCurvetoQuadraticSmoothAbs.idl\nWebCore/svg/SVGPathSegCurvetoQuadraticSmoothRel.idl\nWebCore/svg/SVGPathSegLinetoAbs.idl\nWebCore/svg/SVGPathSegLinetoHorizontalAbs.idl\nWebCore/svg/SVGPathSegLinetoHorizontalRel.idl\nWebCore/svg/SVGPathSegLinetoRel.idl\nWebCore/svg/SVGPathSegLinetoVerticalAbs.idl\nWebCore/svg/SVGPathSegLinetoVerticalRel.idl\nWebCore/svg/SVGPathSegList.idl\nWebCore/svg/SVGPathSegMovetoAbs.idl\nWebCore/svg/SVGPathSegMovetoRel.idl\nWebCore/svg/SVGPatternElement.idl\nWebCore/svg/SVGPoint.idl\nWebCore/svg/SVGPointList.idl\nWebCore/svg/SVGPolygonElement.idl\nWebCore/svg/SVGPolylineElement.idl\nWebCore/svg/SVGPreserveAspectRatio.idl\nWebCore/svg/SVGRadialGradientElement.idl\nWebCore/svg/SVGRect.idl\nWebCore/svg/SVGRectElement.idl\nWebCore/svg/SVGRenderingIntent.idl\nWebCore/svg/SVGSVGElement.idl\nWebCore/svg/SVGScriptElement.idl\nWebCore/svg/SVGSetElement.idl\nWebCore/svg/SVGStopElement.idl\nWebCore/svg/SVGStringList.idl\nWebCore/svg/SVGStyleElement.idl\nWebCore/svg/SVGStyledElement.idl\nWebCore/svg/SVGSwitchElement.idl\nWebCore/svg/SVGSymbolElement.idl\nWebCore/svg/SVGTRefElement.idl\nWebCore/svg/SVGTSpanElement.idl\nWebCore/svg/SVGTests.idl\nWebCore/svg/SVGTextContentElement.idl\nWebCore/svg/SVGTextElement.idl\nWebCore/svg/SVGTextPathElement.idl\nWebCore/svg/SVGTextPositioningElement.idl\nWebCore/svg/SVGTitleElement.idl\nWebCore/svg/SVGTransform.idl\nWebCore/svg/SVGTransformList.idl\nWebCore/svg/SVGTransformable.idl\nWebCore/svg/SVGURIReference.idl\nWebCore/svg/SVGUnitTypes.idl\nWebCore/svg/SVGUseElement.idl\nWebCore/svg/SVGVKernElement.idl\nWebCore/svg/SVGViewElement.idl\nWebCore/svg/SVGViewSpec.idl\nWebCore/svg/SVGZoomAndPan.idl\nWebCore/svg/SVGZoomEvent.idl\nWebCore/testing/Internals.idl\nWebCore/testing/InternalSettings.idl\nWebCore/testing/MallocStatistics.idl\nWebCore/testing/MemoryInfo.idl\nWebCore/testing/TypeConversions.idl\nWebCore/workers/AbstractWorker.idl\nWebCore/workers/DedicatedWorkerContext.idl\nWebCore/workers/SharedWorker.idl\nWebCore/workers/SharedWorkerContext.idl\nWebCore/workers/Worker.idl\nWebCore/workers/WorkerContext.idl\nWebCore/workers/WorkerLocation.idl\nWebCore/xml/DOMParser.idl\nWebCore/xml/XMLHttpRequest.idl\nWebCore/xml/XMLHttpRequestException.idl\nWebCore/xml/XMLHttpRequestProgressEvent.idl\nWebCore/xml/XMLHttpRequestUpload.idl\nWebCore/xml/XMLSerializer.idl\nWebCore/xml/XPathEvaluator.idl\nWebCore/xml/XPathException.idl\nWebCore/xml/XPathExpression.idl\nWebCore/xml/XPathNSResolver.idl\nWebCore/xml/XPathResult.idl\nWebCore/xml/XSLTProcessor.idl\nInternalSettingsGenerated.idl\nWebCore/inspector/JavaScriptCallFrame.idl\n" > ./idl_files.tmp
+perl JavaScriptCore/docs/make-bytecode-docs.pl JavaScriptCore/interpreter/Interpreter.cpp docs/bytecode.html
+cat WebCore/css/CSSPropertyNames.in WebCore/css/SVGCSSPropertyNames.in > CSSPropertyNames.in
+rm -f ./idl_files.tmp
+python JavaScriptCore/KeywordLookupGenerator.py JavaScriptCore/parser/Keywords.table > KeywordLookup.h
+sed -e s/\<WebCore/\<WebKit/ -e s/DOMDOMImplementation/DOMImplementation/ /Volumes/Data/Build/Release/WebCore.framework/PrivateHeaders/DOM.h > /Volumes/Data/Build/Release/WebKit.framework/Versions/A/Headers/DOM.h
+END
+
+for my $line (@expectIgnoredLines) {
+ is(shouldIgnoreLine("", $line), 1, description("Ignored: " . $line));
+}
+
+#
+# Test input starting with four spaces
+#
+my @buildSettingsLines = split(/$INPUT_RECORD_SEPARATOR/, <<'END');
+Build settings from command line:
+ ARCHS = i386 x86_64
+ OBJROOT = /Volumes/Data/Build
+ ONLY_ACTIVE_ARCH = NO
+ SHARED_PRECOMPS_DIR = /Volumes/Data/Build/PrecompiledHeaders
+ SYMROOT = /Volumes/Data/Build
+END
+
+for my $i (0..scalar(@buildSettingsLines) - 1) {
+ my $previousLine = $i ? $buildSettingsLines[$i - 1] : "";
+ my $line = $buildSettingsLines[$i];
+ is(shouldIgnoreLine($previousLine, $line), 1, description("Ignored: " . $line));
+}
+
+#
+# Test input for undefined symbols error message
+#
+my @undefinedSymbolsLines = split(/$INPUT_RECORD_SEPARATOR/, <<'END');
+Undefined symbols for architecture x86_64:
+ "__ZN6WebKit12WebPageProxy28exposedRectChangedTimerFiredEPN7WebCore5TimerIS0_EE", referenced from:
+ __ZN6WebKit12WebPageProxyC2EPNS_10PageClientEN3WTF10PassRefPtrINS_15WebProcessProxyEEEPNS_12WebPageGroupEy in WebPageProxy.o
+ld: symbol(s) not found for architecture x86_64
+clang: error: linker command failed with exit code 1 (use -v to see invocation)
+END
+
+for my $i (0..scalar(@undefinedSymbolsLines) - 1) {
+ my $previousLine = $i ? $undefinedSymbolsLines[$i - 1] : "";
+ my $line = $undefinedSymbolsLines[$i];
+ is(shouldIgnoreLine($previousLine, $line), 0, description("Printed: " . $line));
+}
+
+done_testing();
+
+sub description($)
+{
+ my ($line) = @_;
+
+ my $maxLineLength = 200;
+ my $ellipsis = "...";
+ my $truncateLength = $maxLineLength - length($ellipsis);
+
+ my $description = length($line) > $maxLineLength ? substr($line, 0, $truncateLength) : $line;
+ $description .= $ellipsis if length($line) != length($description);
+
+ return $description;
+}
diff --git a/Tools/Scripts/webkitperl/httpd.pm b/Tools/Scripts/webkitperl/httpd.pm
index 58ff108d3..f61dfa00b 100644
--- a/Tools/Scripts/webkitperl/httpd.pm
+++ b/Tools/Scripts/webkitperl/httpd.pm
@@ -90,6 +90,14 @@ sub hasHTTPD
return system(@command) == 0;
}
+sub getApacheVersion
+{
+ my $httpdPath = getHTTPDPath();
+ my $version = `$httpdPath -v`;
+ $version =~ s/.*Server version: Apache\/(\d+\.\d+).*/$1/s;
+ return $version;
+}
+
sub getDefaultConfigForTestDirectory
{
my ($testDirectory) = @_;
@@ -112,11 +120,14 @@ sub getDefaultConfigForTestDirectory
"-c", "TypesConfig \"$typesConfig\"",
# Apache wouldn't run CGIs with permissions==700 otherwise
"-c", "User \"#$<\"",
- "-c", "LockFile \"$httpdLockFile\"",
"-c", "PidFile \"$httpdPidFile\"",
"-c", "ScoreBoardFile \"$httpdScoreBoardFile\"",
);
+ if (getApacheVersion() eq "2.2") {
+ push(@httpdArgs, "-c", "LockFile \"$httpdLockFile\"");
+ }
+
# FIXME: Enable this on Windows once <rdar://problem/5345985> is fixed
# The version of Apache we use with Cygwin does not support SSL
my $sslCertificate = "$testDirectory/http/conf/webkit-httpd.pem";
@@ -134,6 +145,7 @@ sub getHTTPDConfigPathForTestDirectory
my $httpdConfig;
my $httpdPath = getHTTPDPath();
my $httpdConfDirectory = "$testDirectory/http/conf/";
+ my $apacheVersion = getApacheVersion();
if (isCygwin()) {
my $libPHP4DllPath = "/usr/lib/apache/libphp4.dll";
@@ -144,9 +156,9 @@ sub getHTTPDConfigPathForTestDirectory
}
$httpdConfig = "cygwin-httpd.conf"; # This is an apache 1.3 config.
} elsif (isDebianBased()) {
- $httpdConfig = "apache2-debian-httpd.conf";
+ $httpdConfig = "debian-httpd-$apacheVersion.conf";
} elsif (isFedoraBased()) {
- $httpdConfig = "fedora-httpd.conf"; # This is an apache2 config, despite the name.
+ $httpdConfig = "fedora-httpd-$apacheVersion.conf";
} else {
# All other ports use apache2, so just use our default apache2 config.
$httpdConfig = "apache2-httpd.conf";
diff --git a/Tools/Scripts/webkitperl/prepare-ChangeLog_unittest/resources/cpp_unittests-expected.txt b/Tools/Scripts/webkitperl/prepare-ChangeLog_unittest/resources/cpp_unittests-expected.txt
index d94b07406..c58560e98 100644
--- a/Tools/Scripts/webkitperl/prepare-ChangeLog_unittest/resources/cpp_unittests-expected.txt
+++ b/Tools/Scripts/webkitperl/prepare-ChangeLog_unittest/resources/cpp_unittests-expected.txt
@@ -138,11 +138,6 @@
'Class2::func25'
],
[
- '158',
- '159',
- 'Class1'
- ],
- [
'162',
'164',
'Class1::func26'
@@ -183,11 +178,6 @@
'Class103::Class103'
],
[
- '204',
- '205',
- 'Struct1'
- ],
- [
'208',
'210',
'Struct1::func29'
@@ -198,159 +188,34 @@
'Struct2::func30'
],
[
- '219',
- '219',
- 'NameSpace1'
- ],
- [
'220',
'222',
'NameSpace1::func30'
],
[
- '223',
- '223',
- 'NameSpace1'
- ],
- [
- '228',
- '228',
- 'NameSpace2'
- ],
- [
'229',
'231',
'NameSpace1::NameSpace2::func31'
],
[
- '232',
- '232',
- 'NameSpace2'
- ],
- [
- '237',
- '240',
- 'Class104'
- ],
- [
- '244',
- '249',
- 'Class105'
- ],
- [
- '253',
- '254',
- 'Class106'
- ],
- [
'255',
'259',
'Class106::func32'
],
[
- '260',
- '261',
- 'Class106'
- ],
- [
'262',
'266',
'Class106::func33'
],
[
- '267',
- '268',
- 'Class106'
- ],
- [
- '272',
- '273',
- 'NameSpace3'
- ],
- [
- '275',
- '276',
- 'NameSpace4'
- ],
- [
- '278',
- '279',
- 'NameSpace3'
- ],
- [
- '283',
- '284',
- 'NameSpace5'
- ],
- [
- '286',
- '287',
- 'NameSpace6'
- ],
- [
- '289',
- '290',
- 'Class107'
- ],
- [
'291',
'295',
'NameSpace5::NameSpace6::Class107::func34'
],
[
- '296',
- '297',
- 'Class107'
- ],
- [
- '299',
- '300',
- 'NameSpace6'
- ],
- [
- '302',
- '303',
- 'NameSpace5'
- ],
- [
- '307',
- '307',
- 'Class108'
- ],
- [
'308',
'320',
'Class108::func35'
],
- [
- '321',
- '321',
- 'Class108'
- ],
- [
- '340',
- '354',
- 'NameSpace7'
- ],
- [
- '356',
- '369',
- 'NameSpace8'
- ],
- [
- '371',
- '371',
- 'NameSpace7'
- ],
- [
- '373',
- '386',
- 'Class109'
- ],
- [
- '388',
- '388',
- 'NameSpace7'
- ]
]
}
diff --git a/Tools/Scripts/webkitpy/bindings/main.py b/Tools/Scripts/webkitpy/bindings/main.py
index 15884bb73..9c82b9ae6 100644
--- a/Tools/Scripts/webkitpy/bindings/main.py
+++ b/Tools/Scripts/webkitpy/bindings/main.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -62,7 +61,7 @@ class BindingsTests:
exit_code = e.exit_code
return exit_code
- def generate_supplemental_dependency(self, input_directory, supplemental_dependency_file):
+ def generate_supplemental_dependency(self, input_directory, supplemental_dependency_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file):
idl_files_list = tempfile.mkstemp()
for input_file in os.listdir(input_directory):
(name, extension) = os.path.splitext(input_file)
@@ -77,7 +76,10 @@ class BindingsTests:
'--idlFilesList', idl_files_list[1],
'--defines', '',
'--supplementalDependencyFile', supplemental_dependency_file,
- '--idlAttributesFile', 'WebCore/bindings/scripts/IDLAttributes.txt']
+ '--windowConstructorsFile', window_constructors_file,
+ '--workerGlobalScopeConstructorsFile', workerglobalscope_constructors_file,
+ '--sharedWorkerGlobalScopeConstructorsFile', sharedworkerglobalscope_constructors_file,
+ '--dedicatedWorkerGlobalScopeConstructorsFile', dedicatedworkerglobalscope_constructors_file]
exit_code = 0
try:
@@ -152,9 +154,17 @@ class BindingsTests:
input_directory = os.path.join('WebCore', 'bindings', 'scripts', 'test')
supplemental_dependency_file = tempfile.mkstemp()[1]
- if self.generate_supplemental_dependency(input_directory, supplemental_dependency_file):
+ window_constructors_file = tempfile.mkstemp()[1]
+ workerglobalscope_constructors_file = tempfile.mkstemp()[1]
+ sharedworkerglobalscope_constructors_file = tempfile.mkstemp()[1]
+ dedicatedworkerglobalscope_constructors_file = tempfile.mkstemp()[1]
+ if self.generate_supplemental_dependency(input_directory, supplemental_dependency_file, window_constructors_file, workerglobalscope_constructors_file, sharedworkerglobalscope_constructors_file, dedicatedworkerglobalscope_constructors_file):
print 'Failed to generate a supplemental dependency file.'
os.remove(supplemental_dependency_file)
+ os.remove(window_constructors_file)
+ os.remove(workerglobalscope_constructors_file)
+ os.remove(sharedworkerglobalscope_constructors_file)
+ os.remove(dedicatedworkerglobalscope_constructors_file)
return -1
for generator in self.generators:
@@ -164,6 +174,10 @@ class BindingsTests:
all_tests_passed = False
os.remove(supplemental_dependency_file)
+ os.remove(window_constructors_file)
+ os.remove(workerglobalscope_constructors_file)
+ os.remove(sharedworkerglobalscope_constructors_file)
+ os.remove(dedicatedworkerglobalscope_constructors_file)
print ''
if all_tests_passed:
print 'All tests PASS!'
diff --git a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
index a5fd06568..dcd649a5a 100644
--- a/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/baselineoptimizer_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
from webkitpy.common.system.filesystem_mock import MockFileSystem
@@ -61,25 +61,24 @@ class BaselineOptimizerTest(unittest.TestCase):
def test_move_baselines(self):
host = MockHost()
- host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium-win/another/test-expected.txt', 'result A')
- host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium-mac/another/test-expected.txt', 'result A')
- host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/chromium/another/test-expected.txt', 'result B')
+ host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac-lion/another/test-expected.txt', 'result A')
+ host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac-lion-wk2/another/test-expected.txt', 'result A')
+ host.filesystem.write_binary_file('/mock-checkout/LayoutTests/platform/mac/another/test-expected.txt', 'result B')
baseline_optimizer = BaselineOptimizer(host, host.port_factory.all_port_names())
baseline_optimizer._move_baselines('another/test-expected.txt', {
- 'LayoutTests/platform/chromium-win': 'aaa',
- 'LayoutTests/platform/chromium-mac': 'aaa',
- 'LayoutTests/platform/chromium': 'bbb',
+ 'LayoutTests/platform/mac-lion': 'aaa',
+ 'LayoutTests/platform/mac-lion-wk2': 'aaa',
+ 'LayoutTests/platform/mac': 'bbb',
}, {
- 'LayoutTests/platform/chromium': 'aaa',
+ 'LayoutTests/platform/mac': 'aaa',
})
- self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/LayoutTests/platform/chromium/another/test-expected.txt'), 'result A')
+ self.assertEqual(host.filesystem.read_binary_file('/mock-checkout/LayoutTests/platform/mac/another/test-expected.txt'), 'result A')
- def test_chromium_linux_redundant_with_win(self):
+ def test_efl(self):
self._assertOptimization({
- 'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
- 'LayoutTests/platform/chromium-linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+ 'LayoutTests/platform/efl': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
}, {
- 'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
+ 'LayoutTests/platform/efl': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
def test_no_add_mac_future(self):
@@ -87,21 +86,10 @@ class BaselineOptimizerTest(unittest.TestCase):
'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
'LayoutTests/platform/win-xp': '453e67177a75b2e79905154ece0efba6e5bfb65d',
'LayoutTests/platform/mac-lion': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
- 'LayoutTests/platform/chromium-mac': 'a9ba153c700a94ae1b206d8e4a75a621a89b4554',
}, {
'LayoutTests/platform/mac': '29a1715a6470d5dd9486a142f609708de84cdac8',
'LayoutTests/platform/win-xp': '453e67177a75b2e79905154ece0efba6e5bfb65d',
'LayoutTests/platform/mac-lion': 'c43eaeb358f49d5e835236ae23b7e49d7f2b089f',
- 'LayoutTests/platform/chromium-mac': 'a9ba153c700a94ae1b206d8e4a75a621a89b4554',
- })
-
- def test_chromium_covers_mac_win_linux(self):
- self._assertOptimization({
- 'LayoutTests/platform/chromium-mac': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
- 'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
- 'LayoutTests/platform/chromium-linux': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
- }, {
- 'LayoutTests/platform/chromium': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
})
def test_mac_future(self):
@@ -124,13 +112,11 @@ class BaselineOptimizerTest(unittest.TestCase):
'LayoutTests/platform/mac': '2',
'LayoutTests/platform/gtk': '3',
'LayoutTests/platform/qt': '4',
- 'LayoutTests/platform/chromium': '5',
}, {
'LayoutTests/platform/win': '1',
'LayoutTests/platform/mac': '2',
'LayoutTests/platform/gtk': '3',
'LayoutTests/platform/qt': '4',
- 'LayoutTests/platform/chromium': '5',
})
def test_common_directory_includes_root(self):
@@ -139,17 +125,12 @@ class BaselineOptimizerTest(unittest.TestCase):
self._assertOptimizationFailed({
'LayoutTests/platform/gtk': 'e8608763f6241ddacdd5c1ef1973ba27177d0846',
'LayoutTests/platform/qt': 'bcbd457d545986b7abf1221655d722363079ac87',
- 'LayoutTests/platform/chromium-win': '3764ac11e1f9fbadd87a90a2e40278319190a0d3',
'LayoutTests/platform/mac': 'e8608763f6241ddacdd5c1ef1973ba27177d0846',
})
self._assertOptimization({
- 'LayoutTests/platform/chromium-win': '23a30302a6910f8a48b1007fa36f3e3158341834',
'LayoutTests': '9c876f8c3e4cc2aef9519a6c1174eb3432591127',
- 'LayoutTests/platform/chromium-mac': '23a30302a6910f8a48b1007fa36f3e3158341834',
- 'LayoutTests/platform/chromium': '1',
}, {
- 'LayoutTests/platform/chromium': '23a30302a6910f8a48b1007fa36f3e3158341834',
'LayoutTests': '9c876f8c3e4cc2aef9519a6c1174eb3432591127',
})
@@ -159,35 +140,22 @@ class BaselineOptimizerTest(unittest.TestCase):
if sys.platform == 'win32':
return
self._assertOptimization({
- 'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
'LayoutTests/platform/mac': '5daa78e55f05d9f0d1bb1f32b0cd1bc3a01e9364',
- 'LayoutTests/platform/chromium-win-xp': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
'LayoutTests/platform/mac-lion': '7ad045ece7c030e2283c5d21d9587be22bcba56e',
- 'LayoutTests/platform/chromium-win': 'f83af9732ce74f702b8c9c4a3d9a4c6636b8d3bd',
'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7',
- 'LayoutTests/platform/chromium-linux': 'f52fcdde9e4be8bd5142171cd859230bd4471036',
}, {
- 'LayoutTests/platform/chromium-win': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
'LayoutTests/platform/mac': '5daa78e55f05d9f0d1bb1f32b0cd1bc3a01e9364',
- 'LayoutTests/platform/chromium-win-xp': '462d03b9c025db1b0392d7453310dbee5f9a9e74',
'LayoutTests/platform/mac-lion': '7ad045ece7c030e2283c5d21d9587be22bcba56e',
- 'LayoutTests/platform/chromium-win': 'f83af9732ce74f702b8c9c4a3d9a4c6636b8d3bd',
'LayoutTests/platform/win-xp': '5b1253ef4d5094530d5f1bc6cdb95c90b446bec7',
- 'LayoutTests/platform/chromium-linux': 'f52fcdde9e4be8bd5142171cd859230bd4471036'
})
def test_virtual_ports_filtered(self):
self._assertOptimization({
- 'LayoutTests/platform/chromium-mac': '1',
- 'LayoutTests/platform/chromium-mac-snowleopard': '1',
- 'LayoutTests/platform/chromium-win': '2',
'LayoutTests/platform/gtk': '3',
'LayoutTests/platform/efl': '3',
'LayoutTests/platform/qt': '4',
'LayoutTests/platform/mac': '5',
}, {
- 'LayoutTests/platform/chromium-mac': '1',
- 'LayoutTests/platform/chromium-win': '2',
'LayoutTests': '3',
'LayoutTests/platform/qt': '4',
'LayoutTests/platform/mac': '5',
diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog.py b/Tools/Scripts/webkitpy/common/checkout/changelog.py
index c5cf42c79..47c6b64c5 100644
--- a/Tools/Scripts/webkitpy/common/checkout/changelog.py
+++ b/Tools/Scripts/webkitpy/common/checkout/changelog.py
@@ -28,14 +28,13 @@
#
# WebKit's Python module for parsing and modifying ChangeLog files
-import codecs
-import fileinput # inplace file editing for set_reviewer_in_changelog
import logging
import re
+from StringIO import StringIO
import textwrap
from webkitpy.common.config.committers import CommitterList
-from webkitpy.common.config.committers import Account
+from webkitpy.common.system.filesystem import FileSystem
import webkitpy.common.config.urls as config_urls
_log = logging.getLogger(__name__)
@@ -64,6 +63,8 @@ class ChangeLogEntry(object):
# e.g. * Source/WebCore/page/EventHandler.cpp: Implement FooBarQuux.
touched_files_regexp = r'^\s*\*\s*(?P<file>[A-Za-z0-9_\-\./\\]+)\s*\:'
+ # e.g. (ChangeLogEntry.touched_functions): Added.
+ touched_functions_regexp = r'^\s*\((?P<function>[^)]*)\):'
# e.g. Reviewed by Darin Adler.
# (Discard everything after the first period to match more invalid lines.)
@@ -102,14 +103,16 @@ class ChangeLogEntry(object):
# e.g. git-svn-id: http://svn.webkit.org/repository/webkit/trunk@96161 268f45cc-cd09-0410-ab3c-d52691b4dbfc
svn_id_regexp = r'git-svn-id: http://svn.webkit.org/repository/webkit/trunk@(?P<svnid>\d+) '
+ split_names_regexp = r'\s*(?:,(?:\s+and\s+|&)?|(?:^|\s+)and\s+|&&|[/+&])\s*'
+
def __init__(self, contents, committer_list=CommitterList(), revision=None):
self._contents = contents
self._committer_list = committer_list
self._revision = revision
self._parse_entry()
- @staticmethod
- def _parse_reviewer_text(text):
+ @classmethod
+ def _parse_reviewer_text(cls, text):
match = re.search(ChangeLogEntry.reviewed_by_regexp, text, re.MULTILINE | re.IGNORECASE)
if not match:
# There are cases where people omit "by". We match it only if reviewer part looked nice
@@ -129,7 +132,7 @@ class ChangeLogEntry(object):
if not len(reviewer_text):
return None, None
- reviewer_list = ChangeLogEntry._split_contributor_names(reviewer_text)
+ reviewer_list = ChangeLogEntry._split_reviewer_names(reviewer_text)
# Get rid of "reviewers" like "even though this is just a..." in "Reviewed by Sam Weinig, even though this is just a..."
# and "who wrote the original code" in "Noam Rosenthal, who wrote the original code"
@@ -137,9 +140,17 @@ class ChangeLogEntry(object):
return reviewer_text, reviewer_list
- @staticmethod
- def _split_contributor_names(text):
- return re.split(r'\s*(?:,(?:\s+and\s+|&)?|(?:^|\s+)and\s+|&&|[/+&])\s*', text)
+ @classmethod
+ def _split_reviewer_names(cls, text):
+ return re.split(ChangeLogEntry.split_names_regexp, text)
+
+ @classmethod
+ def _split_author_names_with_emails(cls, text):
+ regex = '>' + ChangeLogEntry.split_names_regexp
+ names = re.split(regex, text)
+ if len(names) > 1:
+ names = [name + ">" for name in names[:-1]] + [names[-1]]
+ return names
def _fuzz_match_reviewers(self, reviewers_text_list):
if not reviewers_text_list:
@@ -148,24 +159,54 @@ class ChangeLogEntry(object):
# Flatten lists and get rid of any reviewers with more than one candidate.
return [reviewers[0] for reviewers in list_of_reviewers if len(reviewers) == 1]
- @staticmethod
- def _parse_author_name_and_email(author_name_and_email):
+ @classmethod
+ def _parse_author_name_and_email(cls, author_name_and_email):
match = re.match(r'(?P<name>.+?)\s+<(?P<email>[^>]+)>', author_name_and_email)
return {'name': match.group("name"), 'email': match.group("email")}
- @staticmethod
- def _parse_author_text(text):
+ @classmethod
+ def _parse_author_text(cls, text):
if not text:
return []
- authors = ChangeLogEntry._split_contributor_names(text)
+ authors = cls._split_author_names_with_emails(text)
assert(authors and len(authors) >= 1)
- return [ChangeLogEntry._parse_author_name_and_email(author) for author in authors]
+ return [cls._parse_author_name_and_email(author) for author in authors]
+
+ @classmethod
+ def _parse_touched_functions(cls, text):
+ result = {}
+ cur_file = None
+ for line in text.splitlines():
+ file_match = re.match(cls.touched_files_regexp, line)
+ if file_match:
+ cur_file = file_match.group("file")
+ result[cur_file] = []
+ func_match = re.match(cls.touched_functions_regexp, line)
+ if func_match and cur_file:
+ result[cur_file].append(func_match.group("function"))
+ return result
+
+ @classmethod
+ def _parse_bug_description(cls, text):
+ # If line 4 is a bug url, line 3 is the bug description.
+ # It's too hard to guess in other cases, so we return None.
+ lines = text.splitlines()
+ if len(lines) < 4:
+ return None
+ for bug_url in (config_urls.bug_url_short, config_urls.bug_url_long):
+ if re.match("^\s*" + bug_url + "$", lines[3]):
+ return lines[2].strip()
+ return None
def _parse_entry(self):
match = re.match(self.date_line_regexp, self._contents, re.MULTILINE)
if not match:
_log.warning("Creating invalid ChangeLogEntry:\n%s" % self._contents)
+ self._date_line = match.group()
+ self._date = match.group("date")
+ self._bug_description = self._parse_bug_description(self._contents)
+
# FIXME: group("name") does not seem to be Unicode? Probably due to self._contents not being unicode.
self._author_text = match.group("authors") if match else None
self._authors = ChangeLogEntry._parse_author_text(self._author_text)
@@ -175,6 +216,13 @@ class ChangeLogEntry(object):
self._author = self._committer_list.contributor_by_email(self.author_email()) or self._committer_list.contributor_by_name(self.author_name())
self._touched_files = re.findall(self.touched_files_regexp, self._contents, re.MULTILINE)
+ self._touched_functions = self._parse_touched_functions(self._contents)
+
+ def date_line(self):
+ return self._date_line
+
+ def date(self):
+ return self._date
def author_text(self):
return self._author_text
@@ -220,69 +268,75 @@ class ChangeLogEntry(object):
def bug_id(self):
return parse_bug_id_from_changelog(self._contents)
+ def bug_description(self):
+ return self._bug_description
+
def touched_files(self):
return self._touched_files
+ # Returns a dict from file name to lists of function names.
+ def touched_functions(self):
+ return self._touched_functions
+
+ def touched_files_text(self):
+ match = re.search(self.touched_files_regexp, self._contents, re.MULTILINE)
+ return self._contents[match.start():].lstrip("\n\r") if match else ""
+
+ # Determine if any text has been added to the section on touched files
+ def is_touched_files_text_clean(self):
+ file_line_end = r"( (Added|Removed|(Copied|Renamed) from [A-Za-z0-9_\-./\\]+).)?$"
+ for line in self.touched_files_text().splitlines():
+ if re.match(self.touched_files_regexp + file_line_end, line):
+ continue
+ if re.match(self.touched_functions_regexp + "$", line):
+ continue
+ return False
+ return True
# FIXME: Various methods on ChangeLog should move into ChangeLogEntry instead.
class ChangeLog(object):
- def __init__(self, path):
+ def __init__(self, path, filesystem=None):
self.path = path
+ self._filesystem = filesystem or FileSystem()
_changelog_indent = " " * 8
- @staticmethod
- def parse_latest_entry_from_file(changelog_file):
- """changelog_file must be a file-like object which returns
- unicode strings. Use codecs.open or StringIO(unicode())
- to pass file objects to this class."""
- date_line_regexp = re.compile(ChangeLogEntry.date_line_regexp)
- rolled_over_regexp = re.compile(ChangeLogEntry.rolled_over_regexp)
- entry_lines = []
- # The first line should be a date line.
- first_line = changelog_file.readline()
- assert(isinstance(first_line, unicode))
- if not date_line_regexp.match(first_line):
+ @classmethod
+ def parse_latest_entry_from_file(cls, changelog_file):
+ try:
+ return next(cls.parse_entries_from_file(changelog_file))
+ except StopIteration, e:
return None
- entry_lines.append(first_line)
-
- for line in changelog_file:
- # If we've hit the next entry, return.
- if date_line_regexp.match(line) or rolled_over_regexp.match(line):
- # Remove the extra newline at the end
- return ChangeLogEntry(''.join(entry_lines[:-1]))
- entry_lines.append(line)
- return None # We never found a date line!
svn_blame_regexp = re.compile(r'^(\s*(?P<revision>\d+) [^ ]+)\s*(?P<line>.*?\n)')
- @staticmethod
- def _separate_revision_and_line(line):
- match = ChangeLog.svn_blame_regexp.match(line)
+ @classmethod
+ def _separate_revision_and_line(cls, line):
+ match = cls.svn_blame_regexp.match(line)
if not match:
return None, line
return int(match.group('revision')), match.group('line')
- @staticmethod
- def parse_entries_from_file(changelog_file):
+ @classmethod
+ def parse_entries_from_file(cls, changelog_file):
"""changelog_file must be a file-like object which returns
- unicode strings. Use codecs.open or StringIO(unicode())
- to pass file objects to this class."""
+ unicode strings, e.g. from StringIO(unicode()) or
+ fs.open_text_file_for_reading()"""
date_line_regexp = re.compile(ChangeLogEntry.date_line_regexp)
rolled_over_regexp = re.compile(ChangeLogEntry.rolled_over_regexp)
# The first line should be a date line.
- revision, first_line = ChangeLog._separate_revision_and_line(changelog_file.readline())
+ revision, first_line = cls._separate_revision_and_line(changelog_file.readline())
assert(isinstance(first_line, unicode))
- if not date_line_regexp.match(ChangeLog.svn_blame_regexp.sub('', first_line)):
+ if not date_line_regexp.match(cls.svn_blame_regexp.sub('', first_line)):
raise StopIteration
entry_lines = [first_line]
revisions_in_entry = {revision: 1} if revision != None else None
for line in changelog_file:
if revisions_in_entry:
- revision, line = ChangeLog._separate_revision_and_line(line)
+ revision, line = cls._separate_revision_and_line(line)
if rolled_over_regexp.match(line):
break
@@ -303,7 +357,7 @@ class ChangeLog(object):
def latest_entry(self):
# ChangeLog files are always UTF-8, we read them in as such to support Reviewers with unicode in their names.
- changelog_file = codecs.open(self.path, "r", "utf-8")
+ changelog_file = self._filesystem.open_text_file_for_reading(self.path)
try:
return self.parse_latest_entry_from_file(changelog_file)
finally:
@@ -331,20 +385,22 @@ class ChangeLog(object):
first_boilerplate_line_regexp = re.compile(
"%sNeed a short description \(OOPS!\)\." % self._changelog_indent)
removing_boilerplate = False
- # inplace=1 creates a backup file and re-directs stdout to the file
- for line in fileinput.FileInput(self.path, inplace=1):
- if first_boilerplate_line_regexp.search(line):
- message_lines = self._wrap_lines(message)
- print first_boilerplate_line_regexp.sub(message_lines, line),
- # Remove all the ChangeLog boilerplate before the first changed
- # file.
- removing_boilerplate = True
- elif removing_boilerplate:
- if line.find('*') >= 0: # each changed file is preceded by a *
- removing_boilerplate = False
-
- if not removing_boilerplate:
- print line,
+ result = StringIO()
+ with self._filesystem.open_text_file_for_reading(self.path) as file:
+ for line in file:
+ if first_boilerplate_line_regexp.search(line):
+ message_lines = self._wrap_lines(message)
+ result.write(first_boilerplate_line_regexp.sub(message_lines, line))
+ # Remove all the ChangeLog boilerplate before the first changed
+ # file.
+ removing_boilerplate = True
+ elif removing_boilerplate:
+ if line.find('*') >= 0: # each changed file is preceded by a *
+ removing_boilerplate = False
+
+ if not removing_boilerplate:
+ result.write(line)
+ self._filesystem.write_text_file(self.path, result.getvalue())
def set_reviewer(self, reviewer):
latest_entry = self.latest_entry()
@@ -355,25 +411,49 @@ class ChangeLog(object):
if not found_nobody and not reviewer_text:
bug_url_number_of_items = len(re.findall(config_urls.bug_url_long, latest_entry_contents, re.MULTILINE))
bug_url_number_of_items += len(re.findall(config_urls.bug_url_short, latest_entry_contents, re.MULTILINE))
- for line in fileinput.FileInput(self.path, inplace=1):
- found_bug_url = re.search(config_urls.bug_url_long, line)
- if not found_bug_url:
- found_bug_url = re.search(config_urls.bug_url_short, line)
- print line,
- if found_bug_url:
- if bug_url_number_of_items == 1:
- print "\n Reviewed by %s." % (reviewer.encode("utf-8"))
- bug_url_number_of_items -= 1
+ result = StringIO()
+ with self._filesystem.open_text_file_for_reading(self.path) as file:
+ for line in file:
+ found_bug_url = re.search(config_urls.bug_url_long, line)
+ if not found_bug_url:
+ found_bug_url = re.search(config_urls.bug_url_short, line)
+ result.write(line)
+ if found_bug_url:
+ if bug_url_number_of_items == 1:
+ result.write("\n Reviewed by %s.\n" % reviewer)
+ bug_url_number_of_items -= 1
+ self._filesystem.write_text_file(self.path, result.getvalue())
else:
- # inplace=1 creates a backup file and re-directs stdout to the file
- for line in fileinput.FileInput(self.path, inplace=1):
- # Trailing comma suppresses printing newline
- print line.replace("NOBODY (OOPS!)", reviewer.encode("utf-8")),
+ data = self._filesystem.read_text_file(self.path)
+ newdata = data.replace("NOBODY (OOPS!)", reviewer)
+ self._filesystem.write_text_file(self.path, newdata)
def set_short_description_and_bug_url(self, short_description, bug_url):
message = "%s\n%s%s" % (short_description, self._changelog_indent, bug_url)
bug_boilerplate = "%sNeed the bug URL (OOPS!).\n" % self._changelog_indent
- for line in fileinput.FileInput(self.path, inplace=1):
- line = line.replace("Need a short description (OOPS!).", message.encode("utf-8"))
- if line != bug_boilerplate:
- print line,
+ result = StringIO()
+ with self._filesystem.open_text_file_for_reading(self.path) as file:
+ for line in file:
+ line = line.replace("Need a short description (OOPS!).", message)
+ if line != bug_boilerplate:
+ result.write(line)
+ self._filesystem.write_text_file(self.path, result.getvalue())
+
+ def delete_entries(self, num_entries):
+ date_line_regexp = re.compile(ChangeLogEntry.date_line_regexp)
+ rolled_over_regexp = re.compile(ChangeLogEntry.rolled_over_regexp)
+ entries = 0
+ result = StringIO()
+ with self._filesystem.open_text_file_for_reading(self.path) as file:
+ for line in file:
+ if date_line_regexp.match(line):
+ entries += 1
+ elif rolled_over_regexp.match(line):
+ entries = num_entries + 1
+ if entries > num_entries:
+ result.write(line)
+ self._filesystem.write_text_file(self.path, result.getvalue())
+
+ def prepend_text(self, text):
+ data = self._filesystem.read_text_file(self.path)
+ self._filesystem.write_text_file(self.path, text + data)
diff --git a/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
index fa2339f45..05b21e0d3 100644
--- a/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/changelog_unittest.py
@@ -26,18 +26,18 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import codecs
-import os
-import tempfile
-import unittest
+import unittest2 as unittest
from StringIO import StringIO
+from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.checkout.changelog import *
class ChangeLogTest(unittest.TestCase):
+ _changelog_path = 'Tools/ChangeLog'
+
_example_entry = u'''2009-08-17 Peter Kasting <pkasting@google.com>
Reviewed by Tor Arne Vestb\xf8.
@@ -236,17 +236,31 @@ class ChangeLogTest(unittest.TestCase):
changelog_file = StringIO(self._example_changelog)
parsed_entries = list(ChangeLog.parse_entries_from_file(changelog_file))
self.assertEqual(len(parsed_entries), 9)
+ self.assertEqual(parsed_entries[0].date_line(), u"2009-08-17 Tor Arne Vestb\xf8 <vestbo@webkit.org>")
+ self.assertEqual(parsed_entries[0].date(), "2009-08-17")
self.assertEqual(parsed_entries[0].reviewer_text(), "David Levin")
+ self.assertEqual(parsed_entries[0].is_touched_files_text_clean(), False)
+ self.assertEqual(parsed_entries[1].date_line(), "2009-08-16 David Kilzer <ddkilzer@apple.com>")
+ self.assertEqual(parsed_entries[1].date(), "2009-08-16")
self.assertEqual(parsed_entries[1].author_email(), "ddkilzer@apple.com")
+ self.assertEqual(parsed_entries[1].touched_files_text(), " * Scripts/bugzilla-tool:\n * Scripts/modules/scm.py:\n")
+ self.assertEqual(parsed_entries[1].is_touched_files_text_clean(), True)
self.assertEqual(parsed_entries[2].reviewer_text(), "Mark Rowe")
self.assertEqual(parsed_entries[2].touched_files(), ["DumpRenderTree/mac/DumpRenderTreeWindow.mm"])
+ self.assertEqual(parsed_entries[2].touched_functions(), {"DumpRenderTree/mac/DumpRenderTreeWindow.mm": ["-[DumpRenderTreeWindow close]"]})
+ self.assertEqual(parsed_entries[2].is_touched_files_text_clean(), False)
self.assertEqual(parsed_entries[3].author_name(), "Benjamin Poulain")
self.assertEqual(parsed_entries[3].touched_files(), ["platform/cf/KURLCFNet.cpp", "platform/mac/KURLMac.mm",
"WebCoreSupport/ChromeClientEfl.cpp", "ewk/ewk_private.h", "ewk/ewk_view.cpp"])
+ self.assertEqual(parsed_entries[3].touched_functions(), {"platform/cf/KURLCFNet.cpp": ["WebCore::createCFURLFromBuffer", "WebCore::KURL::createCFURL"],
+ "platform/mac/KURLMac.mm": ["WebCore::KURL::operator NSURL *", "WebCore::KURL::createCFURL"],
+ "WebCoreSupport/ChromeClientEfl.cpp": ["WebCore::ChromeClientEfl::closeWindowSoon"], "ewk/ewk_private.h": [], "ewk/ewk_view.cpp": []})
+ self.assertEqual(parsed_entries[3].bug_description(), "[Mac] ResourceRequest's nsURLRequest() does not differentiate null and empty URLs with CFNetwork")
self.assertEqual(parsed_entries[4].reviewer_text(), "David Hyatt")
+ self.assertIsNone(parsed_entries[4].bug_description())
self.assertEqual(parsed_entries[5].reviewer_text(), "Adam Roben")
self.assertEqual(parsed_entries[6].reviewer_text(), "Tony Chang")
- self.assertEqual(parsed_entries[7].reviewer_text(), None)
+ self.assertIsNone(parsed_entries[7].reviewer_text())
self.assertEqual(parsed_entries[8].reviewer_text(), 'Darin Adler')
def test_parse_log_entries_from_annotated_file(self):
@@ -439,6 +453,10 @@ class ChangeLogTest(unittest.TestCase):
[('New Contributor', 'new@webkit.org'), ('Noob', 'noob@webkit.org')])
self._assert_parse_authors('Adam Barth <abarth@webkit.org> && Benjamin Poulain <bpoulain@apple.com>',
[('Adam Barth', 'abarth@webkit.org'), ('Benjamin Poulain', 'bpoulain@apple.com')])
+ self._assert_parse_authors(u'Pawe\u0142 Hajdan, Jr. <phajdan.jr@chromium.org>',
+ [(u'Pawe\u0142 Hajdan, Jr.', u'phajdan.jr@chromium.org')])
+ self._assert_parse_authors(u'Pawe\u0142 Hajdan, Jr. <phajdan.jr@chromium.org>, Adam Barth <abarth@webkit.org>',
+ [(u'Pawe\u0142 Hajdan, Jr.', u'phajdan.jr@chromium.org'), (u'Adam Barth', u'abarth@webkit.org')])
def _assert_has_valid_reviewer(self, reviewer_line, expected):
self.assertEqual(self._entry_with_reviewer(reviewer_line).has_valid_reviewer(), expected)
@@ -454,6 +472,48 @@ class ChangeLogTest(unittest.TestCase):
self._assert_has_valid_reviewer("Rubber stamped by Eric Seidel.", True)
self._assert_has_valid_reviewer("Unreviewed build fix.", True)
+ def test_is_touched_files_text_clean(self):
+ tests = [
+ ('''2013-01-30 Timothy Loh <timloh@chromium.com>
+
+ Make ChangeLogEntry detect annotations by prepare-ChangeLog (Added/Removed/Copied from/Renamed from) as clean.
+ https://bugs.webkit.org/show_bug.cgi?id=108433
+
+ * Scripts/webkitpy/common/checkout/changelog.py:
+ (ChangeLogEntry.is_touched_files_text_clean):
+ * Scripts/webkitpy/common/checkout/changelog_unittest.py:
+ (test_is_touched_files_text_clean):
+''', True),
+ ('''2013-01-10 Alan Cutter <alancutter@chromium.org>
+
+ Perform some file operations (automatically added comments).
+
+ * QueueStatusServer/config/charts.py: Copied from Tools/QueueStatusServer/model/queuelog.py.
+ (get_time_unit):
+ * QueueStatusServer/handlers/queuecharts.py: Added.
+ (QueueCharts):
+ * Scripts/webkitpy/tool/bot/testdata/webkit_sheriff_0.js: Removed.
+ * EWSTools/build-vm.sh: Renamed from Tools/EWSTools/cold-boot.sh.
+''', True),
+ ('''2013-01-30 Timothy Loh <timloh@chromium.com>
+
+ Add unit test (manually added comment).
+
+ * Scripts/webkitpy/common/checkout/changelog_unittest.py:
+ (test_is_touched_files_text_clean): Added.
+''', False),
+ ('''2013-01-30 Timothy Loh <timloh@chromium.com>
+
+ Add file (manually added comment).
+
+ * Scripts/webkitpy/common/checkout/super_changelog.py: Copied from the internet.
+''', False),
+ ]
+
+ for contents, expected_result in tests:
+ entry = ChangeLogEntry(contents)
+ self.assertEqual(entry.is_touched_files_text_clean(), expected_result)
+
def test_latest_entry_parse(self):
changelog_contents = u"%s\n%s" % (self._example_entry, self._example_changelog)
changelog_file = StringIO(changelog_contents)
@@ -462,7 +522,9 @@ class ChangeLogTest(unittest.TestCase):
self.assertEqual(latest_entry.author_name(), "Peter Kasting")
self.assertEqual(latest_entry.author_email(), "pkasting@google.com")
self.assertEqual(latest_entry.reviewer_text(), u"Tor Arne Vestb\xf8")
- self.assertEqual(latest_entry.touched_files(), ["DumpRenderTree/win/DumpRenderTree.vcproj", "DumpRenderTree/win/ImageDiff.vcproj", "DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj"])
+ touched_files = ["DumpRenderTree/win/DumpRenderTree.vcproj", "DumpRenderTree/win/ImageDiff.vcproj", "DumpRenderTree/win/TestNetscapePlugin/TestNetscapePlugin.vcproj"]
+ self.assertEqual(latest_entry.touched_files(), touched_files)
+ self.assertEqual(latest_entry.touched_functions(), dict((f, []) for f in touched_files))
self.assertTrue(latest_entry.reviewer()) # Make sure that our UTF8-based lookup of Tor works.
@@ -473,19 +535,6 @@ class ChangeLogTest(unittest.TestCase):
self.assertEqual(latest_entry.contents(), self._example_entry)
self.assertEqual(latest_entry.author_name(), "Peter Kasting")
- @staticmethod
- def _write_tmp_file_with_contents(byte_array):
- assert(isinstance(byte_array, str))
- (file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
- with os.fdopen(file_descriptor, "w") as file:
- file.write(byte_array)
- return file_path
-
- @staticmethod
- def _read_file_contents(file_path, encoding):
- with codecs.open(file_path, "r", encoding) as file:
- return file.read()
-
# FIXME: We really should be getting this from prepare-ChangeLog itself.
_new_entry_boilerplate = '''2009-08-19 Eric Seidel <eric@webkit.org>
@@ -536,50 +585,83 @@ class ChangeLogTest(unittest.TestCase):
'''
def test_set_reviewer(self):
+ fs = MockFileSystem()
+
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_bugurl, self._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
reviewer_name = 'Test Reviewer'
- ChangeLog(changelog_path).set_reviewer(reviewer_name)
- actual_contents = self._read_file_contents(changelog_path, "utf-8")
+ fs.write_text_file(self._changelog_path, changelog_contents)
+ ChangeLog(self._changelog_path, fs).set_reviewer(reviewer_name)
+ actual_contents = fs.read_text_file(self._changelog_path)
expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
- os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
changelog_contents_without_reviewer_line = u"%s\n%s" % (self._new_entry_boilerplate_without_reviewer_line, self._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents_without_reviewer_line.encode("utf-8"))
- ChangeLog(changelog_path).set_reviewer(reviewer_name)
- actual_contents = self._read_file_contents(changelog_path, "utf-8")
- os.remove(changelog_path)
+ fs.write_text_file(self._changelog_path, changelog_contents_without_reviewer_line)
+ ChangeLog(self._changelog_path, fs).set_reviewer(reviewer_name)
+ actual_contents = fs.read_text_file(self._changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
changelog_contents_without_reviewer_line = u"%s\n%s" % (self._new_entry_boilerplate_without_reviewer_multiple_bugurl, self._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents_without_reviewer_line.encode("utf-8"))
- ChangeLog(changelog_path).set_reviewer(reviewer_name)
- actual_contents = self._read_file_contents(changelog_path, "utf-8")
+ fs.write_text_file(self._changelog_path, changelog_contents_without_reviewer_line)
+ ChangeLog(self._changelog_path, fs).set_reviewer(reviewer_name)
+ actual_contents = fs.read_text_file(self._changelog_path)
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_multiple_bugurl, self._example_changelog)
expected_contents = changelog_contents.replace('NOBODY (OOPS!)', reviewer_name)
- os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
def test_set_short_description_and_bug_url(self):
+ fs = MockFileSystem()
+
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate_with_bugurl, self._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+ fs.write_text_file(self._changelog_path, changelog_contents)
short_description = "A short description"
bug_url = "http://example.com/b/2344"
- ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
- actual_contents = self._read_file_contents(changelog_path, "utf-8")
+ ChangeLog(self._changelog_path, fs).set_short_description_and_bug_url(short_description, bug_url)
+ actual_contents = fs.read_text_file(self._changelog_path)
expected_message = "%s\n %s" % (short_description, bug_url)
expected_contents = changelog_contents.replace("Need a short description (OOPS!).", expected_message)
- os.remove(changelog_path)
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+ fs.write_text_file(self._changelog_path, changelog_contents)
short_description = "A short description 2"
bug_url = "http://example.com/b/2345"
- ChangeLog(changelog_path).set_short_description_and_bug_url(short_description, bug_url)
- actual_contents = self._read_file_contents(changelog_path, "utf-8")
+ ChangeLog(self._changelog_path, fs).set_short_description_and_bug_url(short_description, bug_url)
+ actual_contents = fs.read_text_file(self._changelog_path)
expected_message = "%s\n %s" % (short_description, bug_url)
expected_contents = changelog_contents.replace("Need a short description (OOPS!).\n Need the bug URL (OOPS!).", expected_message)
- os.remove(changelog_path)
+ self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
+
+ def test_delete_entries(self):
+ fs = MockFileSystem()
+ fs.write_text_file(self._changelog_path, self._example_changelog)
+ ChangeLog(self._changelog_path, fs).delete_entries(8)
+ actual_contents = fs.read_text_file(self._changelog_path)
+ expected_contents = """2011-10-11 Antti Koivisto <antti@apple.com>
+
+ Resolve regular and visited link style in a single pass
+ https://bugs.webkit.org/show_bug.cgi?id=69838
+
+ Reviewed by Darin Adler
+
+ We can simplify and speed up selector matching by removing the recursive matching done
+ to generate the style for the :visited pseudo selector. Both regular and visited link style
+ can be generated in a single pass through the style selector.
+
+== Rolled over to ChangeLog-2009-06-16 ==
+"""
+ self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
+
+ ChangeLog(self._changelog_path, fs).delete_entries(2)
+ actual_contents = fs.read_text_file(self._changelog_path)
+ expected_contents = "== Rolled over to ChangeLog-2009-06-16 ==\n"
+ self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
+
+
+ def test_prepend_text(self):
+ fs = MockFileSystem()
+ fs.write_text_file(self._changelog_path, self._example_changelog)
+ ChangeLog(self._changelog_path, fs).prepend_text(self._example_entry + "\n")
+ actual_contents = fs.read_text_file(self._changelog_path)
+ expected_contents = self._example_entry + "\n" + self._example_changelog
self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
diff --git a/Tools/Scripts/webkitpy/common/checkout/checkout.py b/Tools/Scripts/webkitpy/common/checkout/checkout.py
index fb686f4d6..60e15b29c 100644
--- a/Tools/Scripts/webkitpy/common/checkout/checkout.py
+++ b/Tools/Scripts/webkitpy/common/checkout/checkout.py
@@ -32,7 +32,6 @@ from webkitpy.common.config import urls
from webkitpy.common.checkout.changelog import ChangeLog, parse_bug_id_from_changelog
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.checkout.scm import CommitMessage
-from webkitpy.common.checkout.deps import DEPS
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import ScriptError
@@ -135,10 +134,10 @@ class Checkout(object):
def suggested_reviewers(self, git_commit, changed_files=None):
changed_files = self.modified_non_changelogs(git_commit, changed_files)
- commit_infos = self.recent_commit_infos_for_files(changed_files)
- reviewers = [commit_info.reviewer() for commit_info in commit_infos if commit_info.reviewer()]
- reviewers.extend([commit_info.author() for commit_info in commit_infos if commit_info.author() and commit_info.author().can_review])
- return sorted(set(reviewers))
+ commit_infos = sorted(self.recent_commit_infos_for_files(changed_files), key=lambda info: info.revision(), reverse=True)
+ reviewers = filter(lambda person: person and person.can_review, sum(map(lambda info: [info.reviewer(), info.author()], commit_infos), []))
+ unique_reviewers = reduce(lambda suggestions, reviewer: suggestions + [reviewer if reviewer not in suggestions else None], reviewers, [])
+ return filter(lambda reviewer: reviewer, unique_reviewers)
def bug_id_for_this_commit(self, git_commit, changed_files=None):
try:
@@ -146,9 +145,6 @@ class Checkout(object):
except ScriptError, e:
pass # We might not have ChangeLogs.
- def chromium_deps(self):
- return DEPS(self._scm.absolute_path(self._filesystem.join("Source", "WebKit", "chromium", "DEPS")))
-
def apply_patch(self, patch):
# It's possible that the patch was not made from the root directory.
# We should detect and handle that case.
diff --git a/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py b/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py
index 3c050aeb9..8a17145ca 100644
--- a/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py
+++ b/Tools/Scripts/webkitpy/common/checkout/checkout_mock.py
@@ -26,7 +26,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from .deps_mock import MockDEPS
from .commitinfo import CommitInfo
# FIXME: These imports are wrong, we should use a shared MockCommittersList.
@@ -103,9 +102,6 @@ class MockCheckout(object):
def commit_message_for_this_commit(self, git_commit, changed_files=None):
return MockCommitMessage()
- def chromium_deps(self):
- return MockDEPS()
-
def apply_patch(self, patch):
pass
diff --git a/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py b/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py
index a3b47c95e..587798e77 100644
--- a/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/checkout_unittest.py
@@ -30,12 +30,13 @@ import codecs
import os
import shutil
import tempfile
-import unittest
+import unittest2 as unittest
from .checkout import Checkout
from .changelog import ChangeLogEntry
from .scm import CommitMessage, SCMDetector
from .scm.scm_mock import MockSCM
+from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem import FileSystem # FIXME: This should not be needed.
from webkitpy.common.system.filesystem_mock import MockFileSystem
@@ -102,6 +103,7 @@ Second part of this complicated change by me, Tor Arne Vestb\u00f8!
self.temp_dir = str(self.filesystem.mkdtemp(suffix="changelogs"))
self.old_cwd = self.filesystem.getcwd()
self.filesystem.chdir(self.temp_dir)
+ self.webkit_base = WebKitFinder(self.filesystem).webkit_base()
# Trick commit-log-editor into thinking we're in a Subversion working copy so it won't
# complain about not being able to figure out what SCM is in use.
@@ -130,7 +132,7 @@ Second part of this complicated change by me, Tor Arne Vestb\u00f8!
return executive.run_command(*args, **kwargs)
detector = SCMDetector(self.filesystem, executive)
- real_scm = detector.detect_scm_system(self.old_cwd)
+ real_scm = detector.detect_scm_system(self.webkit_base)
mock_scm = MockSCM()
mock_scm.run = mock_run
@@ -141,7 +143,7 @@ Second part of this complicated change by me, Tor Arne Vestb\u00f8!
commit_message = checkout.commit_message_for_this_commit(git_commit=None, return_stderr=True)
# Throw away the first line - a warning about unknown VCS root.
commit_message.message_lines = commit_message.message_lines[1:]
- self.assertEqual(commit_message.message(), self.expected_commit_message)
+ self.assertMultiLineEqual(commit_message.message(), self.expected_commit_message)
class CheckoutTest(unittest.TestCase):
@@ -160,7 +162,7 @@ class CheckoutTest(unittest.TestCase):
checkout = self._make_checkout()
checkout._scm.contents_at_revision = mock_contents_at_revision
entry = checkout._latest_entry_for_changelog_at_revision("foo", "bar")
- self.assertEqual(entry.contents(), _changelog1entry1)
+ self.assertMultiLineEqual(entry.contents(), _changelog1entry1) # Pylint is confused about this line, pylint: disable=E1101
# FIXME: This tests a hack around our current changed_files handling.
# Right now changelog_entries_for_revision tries to fetch deleted files
@@ -191,10 +193,10 @@ class CheckoutTest(unittest.TestCase):
self.assertEqual(commitinfo.bug_id(), 36629)
self.assertEqual(commitinfo.author_name(), u"Tor Arne Vestb\u00f8")
self.assertEqual(commitinfo.author_email(), "vestbo@webkit.org")
- self.assertEqual(commitinfo.reviewer_text(), None)
- self.assertEqual(commitinfo.reviewer(), None)
+ self.assertIsNone(commitinfo.reviewer_text())
+ self.assertIsNone(commitinfo.reviewer())
self.assertEqual(commitinfo.committer_email(), "committer@example.com")
- self.assertEqual(commitinfo.committer(), None)
+ self.assertIsNone(commitinfo.committer())
self.assertEqual(commitinfo.to_json(), {
'bug_id': 36629,
'author_email': 'vestbo@webkit.org',
@@ -207,7 +209,7 @@ class CheckoutTest(unittest.TestCase):
})
checkout.changelog_entries_for_revision = lambda revision, changed_files=None: []
- self.assertEqual(checkout.commit_info_for_revision(1), None)
+ self.assertIsNone(checkout.commit_info_for_revision(1))
def test_bug_id_for_revision(self):
checkout = self._make_checkout()
@@ -247,11 +249,6 @@ class CheckoutTest(unittest.TestCase):
reviewer_names = [reviewer.full_name for reviewer in reviewers]
self.assertEqual(reviewer_names, [u'Tor Arne Vestb\xf8'])
- def test_chromium_deps(self):
- checkout = self._make_checkout()
- checkout._scm.checkout_root = "/foo/bar"
- self.assertEqual(checkout.chromium_deps()._path, '/foo/bar/Source/WebKit/chromium/DEPS')
-
def test_apply_patch(self):
checkout = self._make_checkout()
checkout._executive = MockExecutive(should_log=True)
diff --git a/Tools/Scripts/webkitpy/common/checkout/commitinfo.py b/Tools/Scripts/webkitpy/common/checkout/commitinfo.py
index cba3fdd64..79cb79f7c 100644
--- a/Tools/Scripts/webkitpy/common/checkout/commitinfo.py
+++ b/Tools/Scripts/webkitpy/common/checkout/commitinfo.py
@@ -45,7 +45,7 @@ class CommitInfo(object):
return self._revision
def committer(self):
- return self._committer # None if committer isn't in committers.py
+ return self._committer # None if committer isn't in contributors.json
def committer_email(self):
return self._committer_email
diff --git a/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py b/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py
index f58e6f1ea..826673de6 100644
--- a/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/commitinfo_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.config.committers import CommitterList, Committer, Reviewer
diff --git a/Tools/Scripts/webkitpy/common/checkout/deps.py b/Tools/Scripts/webkitpy/common/checkout/deps.py
deleted file mode 100644
index 2f3a8731e..000000000
--- a/Tools/Scripts/webkitpy/common/checkout/deps.py
+++ /dev/null
@@ -1,61 +0,0 @@
-# Copyright (C) 2011, Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# WebKit's Python module for parsing and modifying ChangeLog files
-
-import codecs
-import fileinput
-import re
-import textwrap
-
-
-class DEPS(object):
-
- _variable_regexp = r"\s+'%s':\s+'(?P<value>\d+)'"
-
- def __init__(self, path):
- # FIXME: This should take a FileSystem object.
- self._path = path
-
- def read_variable(self, name):
- pattern = re.compile(self._variable_regexp % name)
- for line in fileinput.FileInput(self._path):
- match = pattern.match(line)
- if match:
- return int(match.group("value"))
-
- def write_variable(self, name, value):
- pattern = re.compile(self._variable_regexp % name)
- replacement_line = " '%s': '%s'" % (name, value)
- # inplace=1 creates a backup file and re-directs stdout to the file
- for line in fileinput.FileInput(self._path, inplace=1):
- if pattern.match(line):
- print replacement_line
- continue
- # Trailing comma suppresses printing newline
- print line,
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
index 2ed552c45..3a9ea9224 100644
--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
@@ -58,7 +58,7 @@ def git_diff_to_svn_diff(line):
# These regexp patterns should be compiled once instead of every time.
conversion_patterns = (("^diff --git \w/(.+) \w/(?P<FilePath>.+)", lambda matched: "Index: " + matched.group('FilePath') + "\n"),
("^new file.*", lambda matched: "\n"),
- ("^index [0-9a-f]{7}\.\.[0-9a-f]{7} [0-9]{6}", lambda matched: "===================================================================\n"),
+ ("^index (([0-9a-f]{7}\.\.[0-9a-f]{7})|([0-9a-f]{40}\.\.[0-9a-f]{40})) [0-9]{6}", lambda matched: "===================================================================\n"),
("^--- \w/(?P<FilePath>.+)", lambda matched: "--- " + matched.group('FilePath') + "\n"),
("^\+\+\+ \w/(?P<FilePath>.+)", lambda matched: "+++ " + matched.group('FilePath') + "\n"))
@@ -69,19 +69,27 @@ def git_diff_to_svn_diff(line):
return line
+# This function exists so we can unittest get_diff_converter function
+def svn_diff_to_svn_diff(line):
+ return line
+
+
# FIXME: This method belongs on DiffParser
-def get_diff_converter(first_diff_line):
+def get_diff_converter(lines):
"""Gets a converter function of diff lines.
Args:
- first_diff_line: The first filename line of a diff file.
- If this line is git formatted, we'll return a
- converter from git to SVN.
+ lines: The lines of a diff file.
+ If this line is git formatted, we'll return a
+ converter from git to SVN.
"""
- if match(r"^diff --git \w/", first_diff_line):
- return git_diff_to_svn_diff
- return lambda input: input
-
+ for i, line in enumerate(lines[:-1]):
+ # Stop when we find the first patch
+ if line[:3] == "+++" and lines[i + 1] == "---":
+ break
+ if match(r"^diff --git \w/", line):
+ return git_diff_to_svn_diff
+ return svn_diff_to_svn_diff
_INITIAL_STATE = 1
_DECLARED_FILE_PATH = 2
@@ -142,10 +150,9 @@ class DiffParser(object):
current_file = None
old_diff_line = None
new_diff_line = None
+ transform_line = get_diff_converter(diff_input)
for line in diff_input:
line = line.rstrip("\n")
- if state == _INITIAL_STATE:
- transform_line = get_diff_converter(line)
line = transform_line(line)
file_declaration = match(r"^Index: (?P<FilePath>.+)", line)
diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
index cba0f249e..78dab26bc 100644
--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser_unittest.py
@@ -26,13 +26,16 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import cStringIO as StringIO
+import unittest2 as unittest
import diff_parser
import re
from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
class DiffParserTest(unittest.TestCase):
+ maxDiff = None
+
def test_diff_parser(self, parser = None):
if not parser:
parser = diff_parser.DiffParser(DIFF_TEST_DATA.splitlines())
@@ -75,6 +78,42 @@ class DiffParserTest(unittest.TestCase):
self.assertEqual(1, len(diff.lines))
self.assertEqual((0, 1), diff.lines[0][0:2])
+ def test_diff_converter(self):
+ comment_lines = [
+ "Hey guys,\n",
+ "\n",
+ "See my awesome patch below!\n",
+ "\n",
+ " - Cool Hacker\n",
+ "\n",
+ ]
+
+ revision_lines = [
+ "Subversion Revision 289799\n",
+ ]
+
+ svn_diff_lines = [
+ "Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "===================================================================\n",
+ "--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "+++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
+ ]
+ self.assertEqual(diff_parser.get_diff_converter(svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(comment_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(revision_lines + svn_diff_lines), diff_parser.svn_diff_to_svn_diff)
+
+ git_diff_lines = [
+ "diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "index 3c5b45b..0197ead 100644\n",
+ "--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "+++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py\n",
+ "@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):\n",
+ ]
+ self.assertEqual(diff_parser.get_diff_converter(git_diff_lines), diff_parser.git_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(comment_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
+ self.assertEqual(diff_parser.get_diff_converter(revision_lines + git_diff_lines), diff_parser.git_diff_to_svn_diff)
+
def test_git_mnemonicprefix(self):
p = re.compile(r' ([a|b])/')
@@ -90,5 +129,47 @@ class DiffParserTest(unittest.TestCase):
patch = p.sub(lambda x: " %s/" % prefix[x.group(1)], DIFF_TEST_DATA)
self.test_diff_parser(diff_parser.DiffParser(patch.splitlines()))
-if __name__ == '__main__':
- unittest.main()
+ def test_git_diff_to_svn_diff(self):
+ output = """\
+Index: Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+===================================================================
+--- Tools/Scripts/webkitpy/common/checkout/diff_parser.py
++++ Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
+ A
+ B
+ C
++D
+ E
+ F
+"""
+
+ inputfmt = StringIO.StringIO("""\
+diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+index 2ed552c4555db72df16b212547f2c125ae301a04..72870482000c0dba64ce4300ed782c03ee79b74f 100644
+--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
++++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
+ A
+ B
+ C
++D
+ E
+ F
+""")
+ shortfmt = StringIO.StringIO("""\
+diff --git a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+index b48b162..f300960 100644
+--- a/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
++++ b/Tools/Scripts/webkitpy/common/checkout/diff_parser.py
+@@ -59,6 +59,7 @@ def git_diff_to_svn_diff(line):
+ A
+ B
+ C
++D
+ E
+ F
+""")
+
+ self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in shortfmt.readlines()))
+ self.assertMultiLineEqual(output, ''.join(diff_parser.git_diff_to_svn_diff(x) for x in inputfmt.readlines()))
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py b/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
index 1d7484826..593f093c4 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/detection_unittest.py
@@ -28,7 +28,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from .detection import SCMDetector
from webkitpy.common.system.filesystem_mock import MockFileSystem
@@ -42,7 +42,10 @@ class SCMDetectorTest(unittest.TestCase):
executive = MockExecutive(should_log=True)
detector = SCMDetector(filesystem, executive)
- expected_logs = "MOCK run_command: ['svn', 'info'], cwd=/\nMOCK run_command: ['git', 'rev-parse', '--is-inside-work-tree'], cwd=/\n"
+ expected_logs = """\
+MOCK run_command: ['svn', 'info'], cwd=/
+MOCK run_command: ['git', 'rev-parse', '--is-inside-work-tree'], cwd=/
+"""
scm = OutputCapture().assert_outputs(self, detector.detect_scm_system, ["/"], expected_logs=expected_logs)
- self.assertEqual(scm, None)
+ self.assertIsNone(scm)
# FIXME: This should make a synthetic tree and test SVN and Git detection in that tree.
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/git.py b/Tools/Scripts/webkitpy/common/checkout/scm/git.py
index 6313256d8..58eda7032 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/git.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/git.py
@@ -27,6 +27,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import datetime
import logging
import os
import re
@@ -41,16 +42,12 @@ from .svn import SVN, SVNRepository
_log = logging.getLogger(__name__)
-def run_command(*args, **kwargs):
- # FIXME: This should not be a global static.
- # New code should use Executive.run_command directly instead
- return Executive().run_command(*args, **kwargs)
-
-
class AmbiguousCommitError(Exception):
- def __init__(self, num_local_commits, working_directory_is_clean):
+ def __init__(self, num_local_commits, has_working_directory_changes):
+ Exception.__init__(self, "Found %s local commits and the working directory is %s" % (
+ num_local_commits, ["clean", "not clean"][has_working_directory_changes]))
self.num_local_commits = num_local_commits
- self.working_directory_is_clean = working_directory_is_clean
+ self.has_working_directory_changes = has_working_directory_changes
class Git(SCM, SVNRepository):
@@ -126,12 +123,13 @@ class Git(SCM, SVNRepository):
return filepath.replace(root_end_with_slash, '')
@classmethod
- def read_git_config(cls, key, cwd=None):
+ def read_git_config(cls, key, cwd=None, executive=None):
# FIXME: This should probably use cwd=self.checkout_root.
# Pass --get-all for cases where the config has multiple values
# Pass the cwd if provided so that we can handle the case of running webkit-patch outside of the working directory.
# FIXME: This should use an Executive.
- return run_command([cls.executable_name, "config", "--get-all", key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n')
+ executive = executive or Executive()
+ return executive.run_command([cls.executable_name, "config", "--get-all", key], error_handler=Executive.ignore_error, cwd=cwd).rstrip('\n')
@staticmethod
def commit_success_regexp():
@@ -146,13 +144,13 @@ class Git(SCM, SVNRepository):
def rebase_in_progress(self):
return self._filesystem.exists(self.absolute_path(self._filesystem.join('.git', 'rebase-apply')))
- def working_directory_is_clean(self):
- return self._run_git(['diff', 'HEAD', '--no-renames', '--name-only']) == ""
+ def has_working_directory_changes(self):
+ return self._run_git(['diff', 'HEAD', '--no-renames', '--name-only']) != ""
- def clean_working_directory(self):
- # Could run git clean here too, but that wouldn't match working_directory_is_clean
- self._run_git(['reset', '--hard', 'HEAD'])
- # Aborting rebase even though this does not match working_directory_is_clean
+ def discard_working_directory_changes(self):
+ # Could run git clean here too, but that wouldn't match subversion
+ self._run_git(['reset', 'HEAD', '--hard'])
+ # Aborting rebase even though this does not match subversion
if self.rebase_in_progress():
self._run_git(['rebase', '--abort'])
@@ -164,8 +162,8 @@ class Git(SCM, SVNRepository):
def _status_regexp(self, expected_types):
return '^(?P<status>[%s])\t(?P<filename>.+)$' % expected_types
- def add_list(self, paths, return_exit_code=False):
- return self._run_git(["add"] + paths, return_exit_code=return_exit_code)
+ def add_list(self, paths):
+ self._run_git(["add"] + paths)
def delete_list(self, paths):
return self._run_git(["rm", "-f"] + paths)
@@ -182,7 +180,7 @@ class Git(SCM, SVNRepository):
def _upstream_branch(self):
current_branch = self._current_branch()
- return self._branch_from_ref(self.read_git_config('branch.%s.merge' % current_branch, cwd=self.checkout_root).strip())
+ return self._branch_from_ref(self.read_git_config('branch.%s.merge' % current_branch, cwd=self.checkout_root, executive=self._executive).strip())
def merge_base(self, git_commit):
if git_commit:
@@ -248,14 +246,33 @@ class Git(SCM, SVNRepository):
def display_name(self):
return "git"
+ def _most_recent_log_matching(self, grep_str, path):
+ # We use '--grep=' + foo rather than '--grep', foo because
+ # git 1.7.0.4 (and earlier) didn't support the separate arg.
+ return self._run_git(['log', '-1', '--grep=' + grep_str, '--date=iso', self.find_checkout_root(path)])
+
def svn_revision(self, path):
- _log.debug('Running git.head_svn_revision... (Temporary logging message)')
- git_log = self._run_git(['log', '-25', path])
+ git_log = self._most_recent_log_matching('git-svn-id:', path)
match = re.search("^\s*git-svn-id:.*@(?P<svn_revision>\d+)\ ", git_log, re.MULTILINE)
if not match:
return ""
return str(match.group('svn_revision'))
+ def timestamp_of_revision(self, path, revision):
+ git_log = self._most_recent_log_matching('git-svn-id:.*@%s' % revision, path)
+ match = re.search("^Date:\s*(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2}) ([+-])(\d{2})(\d{2})$", git_log, re.MULTILINE)
+ if not match:
+ return ""
+
+ # Manually modify the timezone since Git doesn't have an option to show it in UTC.
+ # Git also truncates milliseconds but we're going to ignore that for now.
+ time_with_timezone = datetime.datetime(int(match.group(1)), int(match.group(2)), int(match.group(3)),
+ int(match.group(4)), int(match.group(5)), int(match.group(6)), 0)
+
+ sign = 1 if match.group(7) == '+' else -1
+ time_without_timezone = time_with_timezone - datetime.timedelta(hours=sign * int(match.group(8)), minutes=int(match.group(9)))
+ return time_without_timezone.strftime('%Y-%m-%dT%H:%M:%SZ')
+
def prepend_svn_revision(self, diff):
revision = self.head_svn_revision()
if not revision:
@@ -293,6 +310,9 @@ class Git(SCM, SVNRepository):
@memoized
def git_commit_from_svn_revision(self, svn_revision):
+ # FIXME: https://bugs.webkit.org/show_bug.cgi?id=111668
+ # We should change this to run git log --grep 'git-svn-id' instead
+ # so that we don't require git+svn to be set up.
git_commit = self._run_git_svn_find_rev('r%s' % svn_revision)
if not git_commit:
# FIXME: Alternatively we could offer to update the checkout? Or return None?
@@ -334,30 +354,30 @@ class Git(SCM, SVNRepository):
def revert_files(self, file_paths):
self._run_git(['checkout', 'HEAD'] + file_paths)
- def _assert_can_squash(self, working_directory_is_clean):
- squash = Git.read_git_config('webkit-patch.commit-should-always-squash', cwd=self.checkout_root)
+ def _assert_can_squash(self, has_working_directory_changes):
+ squash = self.read_git_config('webkit-patch.commit-should-always-squash', cwd=self.checkout_root, executive=self._executive)
should_squash = squash and squash.lower() == "true"
if not should_squash:
# Only warn if there are actually multiple commits to squash.
num_local_commits = len(self.local_commits())
- if num_local_commits > 1 or (num_local_commits > 0 and not working_directory_is_clean):
- raise AmbiguousCommitError(num_local_commits, working_directory_is_clean)
+ if num_local_commits > 1 or (num_local_commits > 0 and has_working_directory_changes):
+ raise AmbiguousCommitError(num_local_commits, has_working_directory_changes)
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
# Username is ignored during Git commits.
- working_directory_is_clean = self.working_directory_is_clean()
+ has_working_directory_changes = self.has_working_directory_changes()
if git_commit:
# Special-case HEAD.. to mean working-copy changes only.
if git_commit.upper() == 'HEAD..':
- if working_directory_is_clean:
+ if not has_working_directory_changes:
raise ScriptError(message="The working copy is not modified. --git-commit=HEAD.. only commits working copy changes.")
self.commit_locally_with_message(message)
return self._commit_on_branch(message, 'HEAD', username=username, password=password)
# Need working directory changes to be committed so we can checkout the merge branch.
- if not working_directory_is_clean:
+ if has_working_directory_changes:
# FIXME: webkit-patch land will modify the ChangeLogs to correct the reviewer.
# That will modify the working-copy and cause us to hit this error.
# The ChangeLog modification could be made to modify the existing local commit.
@@ -365,7 +385,7 @@ class Git(SCM, SVNRepository):
return self._commit_on_branch(message, git_commit, username=username, password=password)
if not force_squash:
- self._assert_can_squash(working_directory_is_clean)
+ self._assert_can_squash(has_working_directory_changes)
self._run_git(['reset', '--soft', self.remote_merge_base()])
self.commit_locally_with_message(message)
return self.push_local_commits_to_server(username=username, password=password)
@@ -406,7 +426,7 @@ class Git(SCM, SVNRepository):
commit_succeeded = False
finally:
# And then swap back to the original branch and clean up.
- self.clean_working_directory()
+ self.discard_working_directory_changes()
self._run_git(['checkout', '-q', branch_name])
self.delete_branch(MERGE_BRANCH_NAME)
@@ -435,7 +455,7 @@ class Git(SCM, SVNRepository):
def remote_branch_ref(self):
# Use references so that we can avoid collisions, e.g. we don't want to operate on refs/heads/trunk if it exists.
- remote_branch_refs = Git.read_git_config('svn-remote.svn.fetch', cwd=self.checkout_root)
+ remote_branch_refs = self.read_git_config('svn-remote.svn.fetch', cwd=self.checkout_root, executive=self._executive)
if not remote_branch_refs:
remote_master_ref = 'refs/remotes/origin/master'
if not self._branch_ref_exists(remote_master_ref):
@@ -452,8 +472,8 @@ class Git(SCM, SVNRepository):
def push_local_commits_to_server(self, username=None, password=None):
dcommit_command = ['svn', 'dcommit']
- if (not username or not password) and not self.has_authorization_for_realm(SVN.svn_server_realm):
- raise AuthenticationError(SVN.svn_server_host, prompt_for_password=True)
+ if (not username or not password) and not self.has_authorization_for_realm(self.svn_server_realm):
+ raise AuthenticationError(self.svn_server_host, prompt_for_password=True)
if username:
dcommit_command.extend(["--username", username])
output = self._run_git(dcommit_command, error_handler=commit_error_handler, input=password)
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
index 7d6e1804d..b005ea239 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm.py
@@ -90,26 +90,6 @@ class SCM:
def script_path(self, script_name):
return self._filesystem.join(self.scripts_directory(), script_name)
- def ensure_clean_working_directory(self, force_clean):
- if self.working_directory_is_clean():
- return
- if not force_clean:
- print self.run(self.status_command(), error_handler=Executive.ignore_error, cwd=self.checkout_root)
- raise ScriptError(message="Working directory has modifications, pass --force-clean or --no-clean to continue.")
- _log.info("Cleaning working directory")
- self.clean_working_directory()
-
- def ensure_no_local_commits(self, force):
- if not self.supports_local_commits():
- return
- commits = self.local_commits()
- if not len(commits):
- return
- if not force:
- _log.error("Working directory has local commits, pass --force-clean to continue.")
- sys.exit(1)
- self.discard_local_commits()
-
def run_status_and_extract_filenames(self, status_command, status_regexp):
filenames = []
# We run with cwd=self.checkout_root so that returned-paths are root-relative.
@@ -147,19 +127,13 @@ class SCM:
def commit_success_regexp():
SCM._subclass_must_implement()
- def working_directory_is_clean(self):
- self._subclass_must_implement()
-
- def clean_working_directory(self):
- self._subclass_must_implement()
-
def status_command(self):
self._subclass_must_implement()
- def add(self, path, return_exit_code=False):
- self.add_list([path], return_exit_code)
+ def add(self, path):
+ self.add_list([path])
- def add_list(self, paths, return_exit_code=False):
+ def add_list(self, paths):
self._subclass_must_implement()
def delete(self, path):
@@ -193,6 +167,10 @@ class SCM:
return self.svn_revision(self.checkout_root)
def svn_revision(self, path):
+ """Returns the latest svn revision found in the checkout."""
+ self._subclass_must_implement()
+
+ def timestamp_of_revision(self, path, revision):
self._subclass_must_implement()
def create_patch(self, git_commit=None, changed_files=None):
@@ -231,12 +209,28 @@ class SCM:
def svn_blame(self, path):
self._subclass_must_implement()
+ def has_working_directory_changes(self):
+ self._subclass_must_implement()
+
+ def discard_working_directory_changes(self):
+ self._subclass_must_implement()
+
+ #--------------------------------------------------------------------------
# Subclasses must indicate if they support local commits,
# but the SCM baseclass will only call local_commits methods when this is true.
@staticmethod
def supports_local_commits():
SCM._subclass_must_implement()
+ def local_commits(self):
+ return []
+
+ def has_local_commits(self):
+ return len(self.local_commits()) > 0
+
+ def discard_local_commits(self):
+ return
+
def remote_merge_base(self):
SCM._subclass_must_implement()
@@ -244,8 +238,12 @@ class SCM:
_log.error("Your source control manager does not support local commits.")
sys.exit(1)
- def discard_local_commits(self):
- pass
+ def local_changes_exist(self):
+ return (self.supports_local_commits() and self.has_local_commits()) or self.has_working_directory_changes()
- def local_commits(self):
- return []
+ def discard_local_changes(self):
+ if self.has_working_directory_changes():
+ self.discard_working_directory_changes()
+
+ if self.has_local_commits():
+ self.discard_local_commits()
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
index 9c5d3af0d..c5d10fcb1 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
@@ -38,21 +38,28 @@ class MockSCM(object):
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
- def add(self, destination_path, return_exit_code=False):
- self.add_list([destination_path], return_exit_code)
+ def add(self, destination_path):
+ self.add_list([destination_path])
- def add_list(self, destination_paths, return_exit_code=False):
+ def add_list(self, destination_paths):
self.added_paths.update(set(destination_paths))
- if return_exit_code:
- return 0
- def ensure_clean_working_directory(self, force_clean):
+ def has_working_directory_changes(self):
+ return False
+
+ def discard_working_directory_changes(self):
pass
def supports_local_commits(self):
return True
- def ensure_no_local_commits(self, force_clean):
+ def has_local_commits(self):
+ return False
+
+ def discard_local_commits(self):
+ pass
+
+ def discard_local_changes(self):
pass
def exists(self, path):
@@ -75,6 +82,9 @@ class MockSCM(object):
def svn_revision(self, path):
return '5678'
+ def timestamp_of_revision(self, path, revision):
+ return '2013-02-01 08:48:05 +0000'
+
def create_patch(self, git_commit, changed_files=None):
return "Patch1"
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
index cff254e63..15432f0e8 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm_unittest.py
@@ -40,7 +40,7 @@ import sys
import subprocess
import tempfile
import time
-import unittest
+import unittest2 as unittest
import urllib
import shutil
@@ -49,14 +49,15 @@ from webkitpy.common.checkout.checkout import Checkout
from webkitpy.common.config.committers import Committer # FIXME: This should not be needed
from webkitpy.common.net.bugzilla import Attachment # FIXME: This should not be needed
from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive_mock import MockExecutive
-
from .git import Git, AmbiguousCommitError
from .detection import detect_scm_system
from .scm import SCM, CheckoutNeedsUpdate, commit_error_handler, AuthenticationError
from .svn import SVN
+
# We cache the mock SVN repo so that we don't create it again for each call to an SVNTest or GitTest test_ method.
# We store it in a global variable so that we can delete this cached repo on exit(3).
# FIXME: Remove this once we migrate to Python 2.7. Unittest in Python 2.7 supports module-specific setup and teardown functions.
@@ -125,7 +126,7 @@ def _git_diff(*args):
# Exists to share svn repository creation code between the git and svn tests
-class SVNTestRepository:
+class SVNTestRepository(object):
@classmethod
def _svn_add(cls, path):
run_command(["svn", "add", path])
@@ -248,7 +249,7 @@ class SCMClassTests(unittest.TestCase):
command_returns_non_zero = ['/bin/sh', '--invalid-option']
# Test when the input pipe process fails.
input_process = subprocess.Popen(command_returns_non_zero, stdout=subprocess.PIPE, stderr=self.dev_null)
- self.assertTrue(input_process.poll() != 0)
+ self.assertNotEqual(input_process.poll(), 0)
self.assertRaises(ScriptError, run_command, ['grep', 'bar'], input=input_process.stdout)
# Test when the run_command process fails.
@@ -299,18 +300,18 @@ class SCMTest(unittest.TestCase):
def _shared_test_changed_files(self):
write_into_file_at_path("test_file", "changed content")
- self.assertEqual(self.scm.changed_files(), ["test_file"])
+ self.assertItemsEqual(self.scm.changed_files(), ["test_file"])
write_into_file_at_path("test_dir/test_file3", "new stuff")
- self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
+ self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
old_cwd = os.getcwd()
os.chdir("test_dir")
# Validate that changed_files does not change with our cwd, see bug 37015.
- self.assertEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
+ self.assertItemsEqual(self.scm.changed_files(), ["test_dir/test_file3", "test_file"])
os.chdir(old_cwd)
def _shared_test_added_files(self):
write_into_file_at_path("test_file", "changed content")
- self.assertEqual(self.scm.added_files(), [])
+ self.assertItemsEqual(self.scm.added_files(), [])
write_into_file_at_path("added_file", "new stuff")
self.scm.add("added_file")
@@ -327,11 +328,11 @@ class SCMTest(unittest.TestCase):
added_files = self.scm.added_files()
if "added_dir" in added_files:
added_files.remove("added_dir")
- self.assertEqual(added_files, ["added_dir/added_file2", "added_file", "added_file3", "added_file4"])
+ self.assertItemsEqual(added_files, ["added_dir/added_file2", "added_file", "added_file3", "added_file4"])
- # Test also to make sure clean_working_directory removes added files
- self.scm.clean_working_directory()
- self.assertEqual(self.scm.added_files(), [])
+ # Test also to make sure discard_working_directory_changes removes added files
+ self.scm.discard_working_directory_changes()
+ self.assertItemsEqual(self.scm.added_files(), [])
self.assertFalse(os.path.exists("added_file"))
self.assertFalse(os.path.exists("added_file3"))
self.assertFalse(os.path.exists("added_file4"))
@@ -342,9 +343,9 @@ class SCMTest(unittest.TestCase):
changed_files = self.scm.changed_files_for_revision(3)
if "test_dir" in changed_files:
changed_files.remove("test_dir")
- self.assertEqual(changed_files, ["test_dir/test_file3", "test_file"])
- self.assertEqual(sorted(self.scm.changed_files_for_revision(4)), sorted(["test_file", "test_file2"])) # Git and SVN return different orders.
- self.assertEqual(self.scm.changed_files_for_revision(2), ["test_file"])
+ self.assertItemsEqual(changed_files, ["test_dir/test_file3", "test_file"])
+ self.assertItemsEqual(self.scm.changed_files_for_revision(4), ["test_file", "test_file2"]) # Git and SVN return different orders.
+ self.assertItemsEqual(self.scm.changed_files_for_revision(2), ["test_file"])
def _shared_test_contents_at_revision(self):
self.assertEqual(self.scm.contents_at_revision("test_file", 3), "test1test2")
@@ -362,7 +363,7 @@ class SCMTest(unittest.TestCase):
self.assertRaises(ScriptError, self.scm.contents_at_revision, "does_not_exist", 2)
def _shared_test_revisions_changing_file(self):
- self.assertEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
+ self.assertItemsEqual(self.scm.revisions_changing_file("test_file"), [5, 4, 3, 2])
self.assertRaises(ScriptError, self.scm.revisions_changing_file, "non_existent_file")
def _shared_test_committer_email_for_revision(self):
@@ -377,10 +378,10 @@ class SCMTest(unittest.TestCase):
def _shared_test_diff_for_revision(self):
# Patch formats are slightly different between svn and git, so just regexp for things we know should be there.
r3_patch = self.scm.diff_for_revision(4)
- self.assertTrue(re.search('test3', r3_patch))
- self.assertFalse(re.search('test4', r3_patch))
- self.assertTrue(re.search('test2', r3_patch))
- self.assertTrue(re.search('test2', self.scm.diff_for_revision(3)))
+ self.assertRegexpMatches(r3_patch, 'test3')
+ self.assertNotRegexpMatches(r3_patch, 'test4')
+ self.assertRegexpMatches(r3_patch, 'test2')
+ self.assertRegexpMatches(self.scm.diff_for_revision(3), 'test2')
def _shared_test_svn_apply_git_patch(self):
self._setup_webkittools_scripts_symlink(self.scm)
@@ -409,7 +410,7 @@ HcmV?d00001
added = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual(512, len(added))
self.assertTrue(added.startswith('GIF89a'))
- self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+ self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# The file already exists.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_addition))
@@ -436,7 +437,7 @@ ptUl-ZG<%a~#LwkIWv&q!KSCH7tQ8cJDiw+|GV?MN)RjY50RTb-xvT&H
self.checkout.apply_patch(self._create_patch(git_binary_modification))
modified = read_from_path('fizzbuzz7.gif', encoding=None)
self.assertEqual('foobar\n', modified)
- self.assertTrue('fizzbuzz7.gif' in self.scm.changed_files())
+ self.assertIn('fizzbuzz7.gif', self.scm.changed_files())
# Applying the same modification should fail.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_modification))
@@ -454,7 +455,7 @@ OcmYex&reD$;sO8*F9L)B
"""
self.checkout.apply_patch(self._create_patch(git_binary_deletion))
self.assertFalse(os.path.exists('fizzbuzz7.gif'))
- self.assertFalse('fizzbuzz7.gif' in self.scm.changed_files())
+ self.assertNotIn('fizzbuzz7.gif', self.scm.changed_files())
# Cannot delete again.
self.assertRaises(ScriptError, self.checkout.apply_patch, self._create_patch(git_binary_deletion))
@@ -463,15 +464,15 @@ OcmYex&reD$;sO8*F9L)B
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
- self.assertTrue("added_dir/added_file" in self.scm.added_files())
+ self.assertIn("added_dir/added_file", self.scm.added_files())
def _shared_test_delete_recursively(self):
os.mkdir("added_dir")
write_into_file_at_path("added_dir/added_file", "new stuff")
self.scm.add("added_dir/added_file")
- self.assertTrue("added_dir/added_file" in self.scm.added_files())
+ self.assertIn("added_dir/added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
- self.assertFalse("added_dir" in self.scm.added_files())
+ self.assertNotIn("added_dir", self.scm.added_files())
def _shared_test_delete_recursively_or_not(self):
os.mkdir("added_dir")
@@ -479,10 +480,10 @@ OcmYex&reD$;sO8*F9L)B
write_into_file_at_path("added_dir/another_added_file", "more new stuff")
self.scm.add("added_dir/added_file")
self.scm.add("added_dir/another_added_file")
- self.assertTrue("added_dir/added_file" in self.scm.added_files())
- self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
+ self.assertIn("added_dir/added_file", self.scm.added_files())
+ self.assertIn("added_dir/another_added_file", self.scm.added_files())
self.scm.delete("added_dir/added_file")
- self.assertTrue("added_dir/another_added_file" in self.scm.added_files())
+ self.assertIn("added_dir/another_added_file", self.scm.added_files())
def _shared_test_exists(self, scm, commit_function):
os.chdir(scm.checkout_root)
@@ -626,6 +627,7 @@ class SVNTest(SCMTest):
SVNTestRepository.setup(self)
os.chdir(self.svn_checkout_path)
self.scm = detect_scm_system(self.svn_checkout_path)
+ self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
@@ -661,9 +663,8 @@ class SVNTest(SCMTest):
self.assertEqual("%s\n" % os.path.realpath(scm.checkout_root), patch_contents) # Add a \n because echo adds a \n.
def test_detection(self):
- scm = detect_scm_system(self.svn_checkout_path)
- self.assertEqual(scm.display_name(), "svn")
- self.assertEqual(scm.supports_local_commits(), False)
+ self.assertEqual(self.scm.display_name(), "svn")
+ self.assertEqual(self.scm.supports_local_commits(), False)
def test_apply_small_binary_patch(self):
patch_contents = """Index: test_file.swf
@@ -687,15 +688,14 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
self.assertEqual(actual_contents, expected_contents)
def test_apply_svn_patch(self):
- scm = detect_scm_system(self.svn_checkout_path)
patch = self._create_patch(_svn_diff("-r5:4"))
- self._setup_webkittools_scripts_symlink(scm)
- Checkout(scm).apply_patch(patch)
+ self._setup_webkittools_scripts_symlink(self.scm)
+ Checkout(self.scm).apply_patch(patch)
def test_commit_logs(self):
# Commits have dates and usernames in them, so we can't just direct compare.
- self.assertTrue(re.search('fourth commit', self.scm.last_svn_commit_log()))
- self.assertTrue(re.search('second commit', self.scm.svn_commit_log(3)))
+ self.assertRegexpMatches(self.scm.last_svn_commit_log(), 'fourth commit')
+ self.assertRegexpMatches(self.scm.svn_commit_log(3), 'second commit')
def _shared_test_commit_with_message(self, username=None):
write_into_file_at_path('test_file', 'more test content')
@@ -716,7 +716,12 @@ Q1dTBx0AAAB42itg4GlgYJjGwMDDyODMxMDw34GBgQEAJPQDJA==
self._shared_test_commit_with_message("dbates@webkit.org")
def test_commit_without_authorization(self):
- self.scm.has_authorization_for_realm = lambda realm: False
+ # FIXME: https://bugs.webkit.org/show_bug.cgi?id=111669
+ # This test ends up looking in the actal $HOME/.subversion for authorization,
+ # which makes it fragile. For now, set it to use a realm that won't be authorized,
+ # but we should really plumb through a fake_home_dir here like we do in
+ # test_has_authorization_for_realm.
+ self.scm.svn_server_realm = '<http://svn.example.com:80> Example'
self.assertRaises(AuthenticationError, self._shared_test_commit_with_message)
def test_has_authorization_for_realm_using_credentials_with_passtype(self):
@@ -756,13 +761,12 @@ END
self.assertTrue(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def _test_has_authorization_for_realm_using_credentials(self, realm, credentials):
- scm = detect_scm_system(self.svn_checkout_path)
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
fake_webkit_auth_file = os.path.join(svn_config_dir_path, "fake_webkit_auth_file")
write_into_file_at_path(fake_webkit_auth_file, credentials)
- result = scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
+ result = self.scm.has_authorization_for_realm(realm, home_directory=fake_home_dir)
os.remove(fake_webkit_auth_file)
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
@@ -783,11 +787,10 @@ END
self.assertFalse(self._test_has_authorization_for_realm_using_credentials(SVN.svn_server_realm, credentials))
def test_not_have_authorization_for_realm_when_missing_credentials_file(self):
- scm = detect_scm_system(self.svn_checkout_path)
fake_home_dir = tempfile.mkdtemp(suffix="fake_home_dir")
svn_config_dir_path = os.path.join(fake_home_dir, ".subversion")
os.mkdir(svn_config_dir_path)
- self.assertFalse(scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
+ self.assertFalse(self.scm.has_authorization_for_realm(SVN.svn_server_realm, home_directory=fake_home_dir))
os.rmdir(svn_config_dir_path)
os.rmdir(fake_home_dir)
@@ -824,13 +827,13 @@ END
def test_delete(self):
os.chdir(self.svn_checkout_path)
self.scm.delete("test_file")
- self.assertTrue("test_file" in self.scm.deleted_files())
+ self.assertIn("test_file", self.scm.deleted_files())
def test_delete_list(self):
os.chdir(self.svn_checkout_path)
self.scm.delete_list(["test_file", "test_file2"])
- self.assertTrue("test_file" in self.scm.deleted_files())
- self.assertTrue("test_file2" in self.scm.deleted_files())
+ self.assertIn("test_file", self.scm.deleted_files())
+ self.assertIn("test_file2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
@@ -867,8 +870,8 @@ END
write_into_file_at_path("test_file", "changed content")
diff = self.scm.diff_for_file('test_file')
- self.assertTrue("-some content" in diff)
- self.assertTrue("+changed content" in diff)
+ self.assertIn("-some content", diff)
+ self.assertIn("+changed content", diff)
def clean_bogus_dir(self):
self.bogus_dir = self.scm._bogus_dir_name()
@@ -888,13 +891,17 @@ END
self.assertFalse(os.path.exists(self.bogus_dir))
def test_svn_lock(self):
- svn_root_lock_path = ".svn/lock"
- write_into_file_at_path(svn_root_lock_path, "", "utf-8")
- # webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
- self.assertRaises(ScriptError, run_command, ['svn', 'update'])
- self.scm.clean_working_directory()
- self.assertFalse(os.path.exists(svn_root_lock_path))
- run_command(['svn', 'update']) # Should succeed and not raise.
+ if self.scm.svn_version() >= "1.7":
+ # the following technique with .svn/lock then svn update doesn't work with subversion client 1.7 or later
+ pass
+ else:
+ svn_root_lock_path = ".svn/lock"
+ write_into_file_at_path(svn_root_lock_path, "", "utf-8")
+ # webkit-patch uses a Checkout object and runs update-webkit, just use svn update here.
+ self.assertRaises(ScriptError, run_command, ['svn', 'update'])
+ self.scm.discard_working_directory_changes()
+ self.assertFalse(os.path.exists(svn_root_lock_path))
+ run_command(['svn', 'update']) # Should succeed and not raise.
def test_exists(self):
self._shared_test_exists(self.scm, self.scm.commit_with_message)
@@ -946,7 +953,7 @@ class GitTest(SCMTest):
scm.commit_locally_with_message('message')
patch = scm.create_patch()
- self.assertFalse(re.search(r'Subversion Revision:', patch))
+ self.assertNotRegexpMatches(patch, r'Subversion Revision:')
def test_orderfile(self):
os.mkdir("Tools")
@@ -1008,7 +1015,7 @@ class GitTest(SCMTest):
def test_head_svn_revision(self):
scm = detect_scm_system(self.untracking_checkout_path)
- # If we cloned a git repo tracking an SVG repo, this would give the same result as
+ # If we cloned a git repo tracking an SVN repo, this would give the same result as
# self._shared_test_head_svn_revision().
self.assertEqual(scm.head_svn_revision(), '')
@@ -1019,8 +1026,8 @@ class GitTest(SCMTest):
scm.commit_locally_with_message('message')
patch = scm.create_patch()
- self.assertFalse(re.search(r'rename from ', patch))
- self.assertFalse(re.search(r'rename to ', patch))
+ self.assertNotRegexpMatches(patch, r'rename from ')
+ self.assertNotRegexpMatches(patch, r'rename to ')
class GitSVNTest(SCMTest):
@@ -1042,6 +1049,7 @@ class GitSVNTest(SCMTest):
SVNTestRepository.setup(self)
self._setup_git_checkout()
self.scm = detect_scm_system(self.git_checkout_path)
+ self.scm.svn_server_realm = None
# For historical reasons, we test some checkout code here too.
self.checkout = Checkout(self.scm)
@@ -1050,9 +1058,8 @@ class GitSVNTest(SCMTest):
self._tear_down_git_checkout()
def test_detection(self):
- scm = detect_scm_system(self.git_checkout_path)
- self.assertEqual(scm.display_name(), "git")
- self.assertEqual(scm.supports_local_commits(), True)
+ self.assertEqual(self.scm.display_name(), "git")
+ self.assertEqual(self.scm.supports_local_commits(), True)
def test_read_git_config(self):
key = 'test.git-config'
@@ -1085,7 +1092,7 @@ class GitSVNTest(SCMTest):
run_command(['git', 'checkout', '-b', 'bar'])
self.scm.delete_branch(new_branch)
- self.assertFalse(re.search(r'foo', run_command(['git', 'branch'])))
+ self.assertNotRegexpMatches(run_command(['git', 'branch']), r'foo')
def test_remote_merge_base(self):
# Diff to merge-base should include working-copy changes,
@@ -1096,8 +1103,8 @@ class GitSVNTest(SCMTest):
diff_to_common_base = _git_diff(self.scm.remote_branch_ref() + '..')
diff_to_merge_base = _git_diff(self.scm.remote_merge_base())
- self.assertFalse(re.search(r'foo', diff_to_common_base))
- self.assertTrue(re.search(r'foo', diff_to_merge_base))
+ self.assertNotRegexpMatches(diff_to_common_base, r'foo')
+ self.assertRegexpMatches(diff_to_merge_base, r'foo')
def test_rebase_in_progress(self):
svn_test_file = os.path.join(self.svn_checkout_path, 'test_file')
@@ -1111,45 +1118,39 @@ class GitSVNTest(SCMTest):
# --quiet doesn't make git svn silent, so use run_silent to redirect output
self.assertRaises(ScriptError, run_silent, ['git', 'svn', '--quiet', 'rebase']) # Will fail due to a conflict leaving us mid-rebase.
- scm = detect_scm_system(self.git_checkout_path)
- self.assertTrue(scm.rebase_in_progress())
+ self.assertTrue(self.scm.rebase_in_progress())
# Make sure our cleanup works.
- scm.clean_working_directory()
- self.assertFalse(scm.rebase_in_progress())
+ self.scm.discard_working_directory_changes()
+ self.assertFalse(self.scm.rebase_in_progress())
# Make sure cleanup doesn't throw when no rebase is in progress.
- scm.clean_working_directory()
+ self.scm.discard_working_directory_changes()
def test_commitish_parsing(self):
- scm = detect_scm_system(self.git_checkout_path)
-
# Multiple revisions are cherry-picked.
- self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
- self.assertEqual(len(scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
+ self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD~2'])), 1)
+ self.assertEqual(len(self.scm.commit_ids_from_commitish_arguments(['HEAD', 'HEAD~2'])), 2)
# ... is an invalid range specifier
- self.assertRaises(ScriptError, scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
+ self.assertRaises(ScriptError, self.scm.commit_ids_from_commitish_arguments, ['trunk...HEAD'])
def test_commitish_order(self):
- scm = detect_scm_system(self.git_checkout_path)
-
commit_range = 'HEAD~3..HEAD'
- actual_commits = scm.commit_ids_from_commitish_arguments([commit_range])
+ actual_commits = self.scm.commit_ids_from_commitish_arguments([commit_range])
expected_commits = []
expected_commits += reversed(run_command(['git', 'rev-list', commit_range]).splitlines())
self.assertEqual(actual_commits, expected_commits)
def test_apply_git_patch(self):
- scm = detect_scm_system(self.git_checkout_path)
# We carefullly pick a diff which does not have a directory addition
# as currently svn-apply will error out when trying to remove directories
# in Git: https://bugs.webkit.org/show_bug.cgi?id=34871
patch = self._create_patch(_git_diff('HEAD..HEAD^'))
- self._setup_webkittools_scripts_symlink(scm)
- Checkout(scm).apply_patch(patch)
+ self._setup_webkittools_scripts_symlink(self.scm)
+ Checkout(self.scm).apply_patch(patch)
def test_commit_text_parsing(self):
write_into_file_at_path('test_file', 'more test content')
@@ -1159,12 +1160,11 @@ class GitSVNTest(SCMTest):
def test_commit_with_message_working_copy_only(self):
write_into_file_at_path('test_file_commit1', 'more test content')
run_command(['git', 'add', 'test_file_commit1'])
- scm = detect_scm_system(self.git_checkout_path)
- commit_text = scm.commit_with_message("yet another test commit")
+ commit_text = self.scm.commit_with_message("yet another test commit")
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def _local_commit(self, filename, contents, message):
write_into_file_at_path(filename, contents)
@@ -1192,112 +1192,102 @@ class GitSVNTest(SCMTest):
def test_revisions_changing_files_with_local_commit(self):
self._one_local_commit()
- self.assertEqual(self.scm.revisions_changing_file('test_file_commit1'), [])
+ self.assertItemsEqual(self.scm.revisions_changing_file('test_file_commit1'), [])
def test_commit_with_message(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
- commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
+ self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
+ commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit2', svn_log))
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit2')
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit(self):
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- commit_text = scm.commit_with_message("another test commit", git_commit="HEAD^")
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD^")
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
- self.assertFalse(re.search(r'test_file_commit2', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
+ self.assertNotRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_git_commit_range(self):
self._three_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- commit_text = scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ commit_text = self.scm.commit_with_message("another test commit", git_commit="HEAD~2..HEAD")
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertFalse(re.search(r'test_file_commit0', svn_log))
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
- self.assertTrue(re.search(r'test_file_commit2', svn_log))
+ self.assertNotRegexpMatches(svn_log, r'test_file_commit0')
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
+ self.assertRegexpMatches(svn_log, r'test_file_commit2')
def test_commit_with_message_only_local_commit(self):
self._one_local_commit()
- scm = detect_scm_system(self.git_checkout_path)
- commit_text = scm.commit_with_message("another test commit")
+ commit_text = self.scm.commit_with_message("another test commit")
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
- scm = detect_scm_system(self.git_checkout_path)
- self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
- commit_text = scm.commit_with_message("another test commit", force_squash=True)
+ self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
+ commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit2', svn_log))
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit2')
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_git_commit_and_working_copy(self):
self._two_local_commits()
write_into_file_at_path('test_file_commit1', 'working copy change')
- scm = detect_scm_system(self.git_checkout_path)
- self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", git_commit="HEAD^")
+ self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", git_commit="HEAD^")
def test_commit_with_message_multiple_local_commits_always_squash(self):
+ run_command(['git', 'config', 'webkit-patch.commit-should-always-squash', 'true'])
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- scm._assert_can_squash = lambda working_directory_is_clean: True
- commit_text = scm.commit_with_message("yet another test commit")
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ commit_text = self.scm.commit_with_message("yet another test commit")
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit2', svn_log))
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit2')
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_multiple_local_commits(self):
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "yet another test commit")
- commit_text = scm.commit_with_message("yet another test commit", force_squash=True)
+ self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "yet another test commit")
+ commit_text = self.scm.commit_with_message("yet another test commit", force_squash=True)
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertTrue(re.search(r'test_file_commit2', svn_log))
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertRegexpMatches(svn_log, r'test_file_commit2')
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- self.assertRaises(AmbiguousCommitError, scm.commit_with_message, "another test commit")
- commit_text = scm.commit_with_message("another test commit", force_squash=True)
+ self.assertRaises(AmbiguousCommitError, self.scm.commit_with_message, "another test commit")
+ commit_text = self.scm.commit_with_message("another test commit", force_squash=True)
- self.assertEqual(scm.svn_revision_from_commit_text(commit_text), '6')
+ self.assertEqual(self.scm.svn_revision_from_commit_text(commit_text), '6')
svn_log = run_command(['git', 'svn', 'log', '--limit=1', '--verbose'])
- self.assertFalse(re.search(r'test_file2', svn_log))
- self.assertTrue(re.search(r'test_file_commit2', svn_log))
- self.assertTrue(re.search(r'test_file_commit1', svn_log))
+ self.assertNotRegexpMatches(svn_log, r'test_file2')
+ self.assertRegexpMatches(svn_log, r'test_file_commit2')
+ self.assertRegexpMatches(svn_log, r'test_file_commit1')
def test_commit_with_message_not_synced_with_conflict(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._local_commit('test_file2', 'asdf', 'asdf commit')
- scm = detect_scm_system(self.git_checkout_path)
# There's a conflict between trunk and the test_file2 modification.
- self.assertRaises(ScriptError, scm.commit_with_message, "another test commit", force_squash=True)
+ self.assertRaises(ScriptError, self.scm.commit_with_message, "another test commit", force_squash=True)
def test_upstream_branch(self):
run_command(['git', 'checkout', '-t', '-b', 'my-branch'])
@@ -1318,96 +1308,85 @@ class GitSVNTest(SCMTest):
def test_create_patch_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch()
- self.assertTrue(re.search(r'test_file_commit1', patch))
- self.assertTrue(re.search(r'test_file_commit2', patch))
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch()
- self.assertTrue(re.search(r'test_file_commit2', patch))
- self.assertTrue(re.search(r'test_file_commit1', patch))
- self.assertTrue(re.search(r'Subversion Revision: 5', patch))
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_after_merge(self):
run_command(['git', 'checkout', '-b', 'dummy-branch', 'trunk~3'])
self._one_local_commit()
run_command(['git', 'merge', 'trunk'])
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch()
- self.assertTrue(re.search(r'test_file_commit1', patch))
- self.assertTrue(re.search(r'Subversion Revision: 5', patch))
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'Subversion Revision: 5')
def test_create_patch_with_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch(changed_files=['test_file_commit2'])
- self.assertTrue(re.search(r'test_file_commit2', patch))
+ patch = self.scm.create_patch(changed_files=['test_file_commit2'])
+ self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_with_rm_and_changed_files(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
os.remove('test_file_commit1')
- patch = scm.create_patch()
- patch_with_changed_files = scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
+ patch = self.scm.create_patch()
+ patch_with_changed_files = self.scm.create_patch(changed_files=['test_file_commit1', 'test_file_commit2'])
self.assertEqual(patch, patch_with_changed_files)
def test_create_patch_git_commit(self):
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch(git_commit="HEAD^")
- self.assertTrue(re.search(r'test_file_commit1', patch))
- self.assertFalse(re.search(r'test_file_commit2', patch))
+ patch = self.scm.create_patch(git_commit="HEAD^")
+ self.assertRegexpMatches(patch, r'test_file_commit1')
+ self.assertNotRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_git_commit_range(self):
self._three_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch(git_commit="HEAD~2..HEAD")
- self.assertFalse(re.search(r'test_file_commit0', patch))
- self.assertTrue(re.search(r'test_file_commit2', patch))
- self.assertTrue(re.search(r'test_file_commit1', patch))
+ patch = self.scm.create_patch(git_commit="HEAD~2..HEAD")
+ self.assertNotRegexpMatches(patch, r'test_file_commit0')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch(git_commit="HEAD....")
- self.assertFalse(re.search(r'test_file_commit1', patch))
- self.assertTrue(re.search(r'test_file_commit2', patch))
+ patch = self.scm.create_patch(git_commit="HEAD....")
+ self.assertNotRegexpMatches(patch, r'test_file_commit1')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
def test_create_patch_multiple_local_commits(self):
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch()
- self.assertTrue(re.search(r'test_file_commit2', patch))
- self.assertTrue(re.search(r'test_file_commit1', patch))
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_patch_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- patch = scm.create_patch()
- self.assertFalse(re.search(r'test_file2', patch))
- self.assertTrue(re.search(r'test_file_commit2', patch))
- self.assertTrue(re.search(r'test_file_commit1', patch))
+ patch = self.scm.create_patch()
+ self.assertNotRegexpMatches(patch, r'test_file2')
+ self.assertRegexpMatches(patch, r'test_file_commit2')
+ self.assertRegexpMatches(patch, r'test_file_commit1')
def test_create_binary_patch(self):
# Create a git binary patch and check the contents.
- scm = detect_scm_system(self.git_checkout_path)
test_file_name = 'binary_file'
test_file_path = os.path.join(self.git_checkout_path, test_file_name)
file_contents = ''.join(map(chr, range(256)))
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
- patch = scm.create_patch()
- self.assertTrue(re.search(r'\nliteral 0\n', patch))
- self.assertTrue(re.search(r'\nliteral 256\n', patch))
+ patch = self.scm.create_patch()
+ self.assertRegexpMatches(patch, r'\nliteral 0\n')
+ self.assertRegexpMatches(patch, r'\nliteral 256\n')
# Check if we can apply the created patch.
run_command(['git', 'rm', '-f', test_file_name])
- self._setup_webkittools_scripts_symlink(scm)
+ self._setup_webkittools_scripts_symlink(self.scm)
self.checkout.apply_patch(self._create_patch(patch))
self.assertEqual(file_contents, read_from_path(test_file_path, encoding=None))
@@ -1415,73 +1394,67 @@ class GitSVNTest(SCMTest):
write_into_file_at_path(test_file_path, file_contents, encoding=None)
run_command(['git', 'add', test_file_name])
run_command(['git', 'commit', '-m', 'binary diff'])
- patch_from_local_commit = scm.create_patch('HEAD')
- self.assertTrue(re.search(r'\nliteral 0\n', patch_from_local_commit))
- self.assertTrue(re.search(r'\nliteral 256\n', patch_from_local_commit))
+
+ patch_from_local_commit = self.scm.create_patch('HEAD')
+ self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 0\n')
+ self.assertRegexpMatches(patch_from_local_commit, r'\nliteral 256\n')
def test_changed_files_local_plus_working_copy(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files()
- self.assertTrue('test_file_commit1' in files)
- self.assertTrue('test_file_commit2' in files)
+ files = self.scm.changed_files()
+ self.assertIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
# working copy should *not* be in the list.
- files = scm.changed_files('trunk..')
- self.assertTrue('test_file_commit1' in files)
- self.assertFalse('test_file_commit2' in files)
+ files = self.scm.changed_files('trunk..')
+ self.assertIn('test_file_commit1', files)
+ self.assertNotIn('test_file_commit2', files)
# working copy *should* be in the list.
- files = scm.changed_files('trunk....')
- self.assertTrue('test_file_commit1' in files)
- self.assertTrue('test_file_commit2' in files)
+ files = self.scm.changed_files('trunk....')
+ self.assertIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
def test_changed_files_git_commit(self):
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files(git_commit="HEAD^")
- self.assertTrue('test_file_commit1' in files)
- self.assertFalse('test_file_commit2' in files)
+ files = self.scm.changed_files(git_commit="HEAD^")
+ self.assertIn('test_file_commit1', files)
+ self.assertNotIn('test_file_commit2', files)
def test_changed_files_git_commit_range(self):
self._three_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files(git_commit="HEAD~2..HEAD")
- self.assertTrue('test_file_commit0' not in files)
- self.assertTrue('test_file_commit1' in files)
- self.assertTrue('test_file_commit2' in files)
+ files = self.scm.changed_files(git_commit="HEAD~2..HEAD")
+ self.assertNotIn('test_file_commit0', files)
+ self.assertIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
def test_changed_files_working_copy_only(self):
self._one_local_commit_plus_working_copy_changes()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files(git_commit="HEAD....")
- self.assertFalse('test_file_commit1' in files)
- self.assertTrue('test_file_commit2' in files)
+ files = self.scm.changed_files(git_commit="HEAD....")
+ self.assertNotIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
def test_changed_files_multiple_local_commits(self):
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files()
- self.assertTrue('test_file_commit2' in files)
- self.assertTrue('test_file_commit1' in files)
+ files = self.scm.changed_files()
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files()
- self.assertFalse('test_file2' in files)
- self.assertTrue('test_file_commit2' in files)
- self.assertTrue('test_file_commit1' in files)
+ files = self.scm.changed_files()
+ self.assertNotIn('test_file2', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit1', files)
def test_changed_files_not_synced(self):
run_command(['git', 'checkout', '-b', 'my-branch', 'trunk~3'])
self._two_local_commits()
- scm = detect_scm_system(self.git_checkout_path)
- files = scm.changed_files()
- self.assertFalse('test_file2' in files)
- self.assertTrue('test_file_commit2' in files)
- self.assertTrue('test_file_commit1' in files)
+ files = self.scm.changed_files()
+ self.assertNotIn('test_file2', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit1', files)
def test_changed_files(self):
self._shared_test_changed_files()
@@ -1499,15 +1472,15 @@ class GitSVNTest(SCMTest):
# equivalent to 'git diff my-branch..HEAD, should not include working changes
files = self.scm.changed_files(git_commit='UPSTREAM..')
- self.assertFalse('test_file_commit1' in files)
- self.assertTrue('test_file_commit2' in files)
- self.assertFalse('test_file_commit0' in files)
+ self.assertNotIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertNotIn('test_file_commit0', files)
# equivalent to 'git diff my-branch', *should* include working changes
files = self.scm.changed_files(git_commit='UPSTREAM....')
- self.assertFalse('test_file_commit1' in files)
- self.assertTrue('test_file_commit2' in files)
- self.assertTrue('test_file_commit0' in files)
+ self.assertNotIn('test_file_commit1', files)
+ self.assertIn('test_file_commit2', files)
+ self.assertIn('test_file_commit0', files)
def test_contents_at_revision(self):
self._shared_test_contents_at_revision()
@@ -1527,13 +1500,13 @@ class GitSVNTest(SCMTest):
def test_delete(self):
self._two_local_commits()
self.scm.delete('test_file_commit1')
- self.assertTrue("test_file_commit1" in self.scm.deleted_files())
+ self.assertIn("test_file_commit1", self.scm.deleted_files())
def test_delete_list(self):
self._two_local_commits()
self.scm.delete_list(["test_file_commit1", "test_file_commit2"])
- self.assertTrue("test_file_commit1" in self.scm.deleted_files())
- self.assertTrue("test_file_commit2" in self.scm.deleted_files())
+ self.assertIn("test_file_commit1", self.scm.deleted_files())
+ self.assertIn("test_file_commit2", self.scm.deleted_files())
def test_delete_recursively(self):
self._shared_test_delete_recursively()
@@ -1546,8 +1519,7 @@ class GitSVNTest(SCMTest):
def test_to_object_name(self):
relpath = 'test_file_commit1'
- fullpath = os.path.join(self.git_checkout_path, relpath)
- self._two_local_commits()
+ fullpath = os.path.realpath(os.path.join(self.git_checkout_path, relpath))
self.assertEqual(relpath, self.scm.to_object_name(fullpath))
def test_show_head(self):
@@ -1568,33 +1540,40 @@ class GitSVNTest(SCMTest):
diff = self.scm.diff_for_file('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
- self.assertTrue("+Updated" in diff)
- self.assertTrue("-more test content" in diff)
+ self.assertIn("+Updated", diff)
+ self.assertIn("-more test content", diff)
self.scm.add('test_file_commit1')
cached_diff = self.scm.diff_for_file('test_file_commit1')
- self.assertTrue("+Updated" in cached_diff)
- self.assertTrue("-more test content" in cached_diff)
+ self.assertIn("+Updated", cached_diff)
+ self.assertIn("-more test content", cached_diff)
def test_exists(self):
- scm = detect_scm_system(self.git_checkout_path)
- self._shared_test_exists(scm, scm.commit_locally_with_message)
+ self._shared_test_exists(self.scm, self.scm.commit_locally_with_message)
# We need to split off more of these SCM tests to use mocks instead of the filesystem.
# This class is the first part of that.
class GitTestWithMock(unittest.TestCase):
+ maxDiff = None
+
def make_scm(self, logging_executive=False):
# We do this should_log dance to avoid logging when Git.__init__ runs sysctl on mac to check for 64-bit support.
- scm = Git(cwd=None, executive=MockExecutive())
+ scm = Git(cwd=".", executive=MockExecutive(), filesystem=MockFileSystem())
+ scm.read_git_config = lambda *args, **kw: "MOCKKEY:MOCKVALUE"
scm._executive._should_log = logging_executive
return scm
def test_create_patch(self):
scm = self.make_scm(logging_executive=True)
- expected_stderr = "MOCK run_command: ['git', 'merge-base', u'refs/remotes/origin/master', 'HEAD'], cwd=%(checkout)s\nMOCK run_command: ['git', 'diff', '--binary', '--no-ext-diff', '--full-index', '-M', 'MOCK output of child process', '--'], cwd=%(checkout)s\nMOCK run_command: ['git', 'log', '-25'], cwd=None\n" % {'checkout': scm.checkout_root}
- OutputCapture().assert_outputs(self, scm.create_patch, expected_stderr=expected_stderr)
+ expected_stderr = """\
+MOCK run_command: ['git', 'merge-base', 'MOCKVALUE', 'HEAD'], cwd=%(checkout)s
+MOCK run_command: ['git', 'diff', '--binary', '--no-color', '--no-ext-diff', '--full-index', '--no-renames', '', 'MOCK output of child process', '--'], cwd=%(checkout)s
+MOCK run_command: ['git', 'rev-parse', '--show-toplevel'], cwd=%(checkout)s
+MOCK run_command: ['git', 'log', '-1', '--grep=git-svn-id:', '--date=iso', './MOCK output of child process/MOCK output of child process'], cwd=%(checkout)s
+""" % {'checkout': scm.checkout_root}
+ OutputCapture().assert_outputs(self, scm.create_patch, expected_logs=expected_stderr)
def test_push_local_commits_to_server_with_username_and_password(self):
self.assertEqual(self.make_scm().push_local_commits_to_server(username='dbates@webkit.org', password='blah'), "MOCK output of child process")
@@ -1608,5 +1587,14 @@ class GitTestWithMock(unittest.TestCase):
def test_push_local_commits_to_server_without_username_and_with_password(self):
self.assertRaises(AuthenticationError, self.make_scm().push_local_commits_to_server, {'password': 'blah'})
-if __name__ == '__main__':
- unittest.main()
+ def test_timestamp_of_revision(self):
+ scm = self.make_scm()
+ scm.find_checkout_root = lambda path: ''
+ scm._run_git = lambda args: 'Date: 2013-02-08 08:05:49 +0000'
+ self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T08:05:49Z')
+
+ scm._run_git = lambda args: 'Date: 2013-02-08 01:02:03 +0130'
+ self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-07T23:32:03Z')
+
+ scm._run_git = lambda args: 'Date: 2013-02-08 01:55:21 -0800'
+ self.assertEqual(scm.timestamp_of_revision('some-path', '12345'), '2013-02-08T09:55:21Z')
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/svn.py b/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
index 1323b702c..c146f5d32 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/svn.py
@@ -29,8 +29,10 @@
import logging
import os
+import random
import re
import shutil
+import string
import sys
import tempfile
@@ -43,9 +45,16 @@ _log = logging.getLogger(__name__)
# A mixin class that represents common functionality for SVN and Git-SVN.
-class SVNRepository:
+class SVNRepository(object):
+ # FIXME: These belong in common.config.urls
+ svn_server_host = "svn.webkit.org"
+ svn_server_realm = "<http://svn.webkit.org:80> Mac OS Forge"
+
def has_authorization_for_realm(self, realm, home_directory=os.getenv("HOME")):
- # ignore false positives for methods implemented in the mixee class. pylint: disable-msg=E1101
+ # If we are working on a file:// repository realm will be None
+ if realm is None:
+ return True
+ # ignore false positives for methods implemented in the mixee class. pylint: disable=E1101
# Assumes find and grep are installed.
if not os.path.isdir(os.path.join(home_directory, ".subversion")):
return False
@@ -63,9 +72,6 @@ class SVNRepository:
class SVN(SCM, SVNRepository):
- # FIXME: These belong in common.config.urls
- svn_server_host = "svn.webkit.org"
- svn_server_realm = "<http://svn.webkit.org:80> Mac OS Forge"
executable_name = "svn"
@@ -106,7 +112,7 @@ class SVN(SCM, SVNRepository):
match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
if not match:
raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
- return match.group('value')
+ return match.group('value').rstrip('\r')
def find_checkout_root(self, path):
uuid = self.find_uuid(path)
@@ -134,10 +140,11 @@ class SVN(SCM, SVNRepository):
def svn_version(self):
return self._run_svn(['--version', '--quiet'])
- def working_directory_is_clean(self):
- return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) == ""
+ def has_working_directory_changes(self):
+ # FIXME: What about files which are not committed yet?
+ return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) != ""
- def clean_working_directory(self):
+ def discard_working_directory_changes(self):
# Make sure there are no locks lying around from a previously aborted svn invocation.
# This is slightly dangerous, as it's possible the user is running another svn process
# on this checkout at the same time. However, it's much more likely that we're running
@@ -174,10 +181,20 @@ class SVN(SCM, SVNRepository):
return
self.add(path)
- def add_list(self, paths, return_exit_code=False):
+ def add_list(self, paths):
for path in paths:
self._add_parent_directories(os.path.dirname(os.path.abspath(path)))
- return self._run_svn(["add"] + paths, return_exit_code=return_exit_code)
+ if self.svn_version() >= "1.7":
+ # For subversion client 1.7 and later, need to add '--parents' option to ensure intermediate directories
+ # are added; in addition, 1.7 returns an exit code of 1 from svn add if one or more of the requested
+ # adds are already under version control, including intermediate directories subject to addition
+ # due to --parents
+ svn_add_args = ['svn', 'add', '--parents'] + paths
+ exit_code = self.run(svn_add_args, return_exit_code=True)
+ if exit_code and exit_code != 1:
+ raise ScriptError(script_args=svn_add_args, exit_code=exit_code)
+ else:
+ self._run_svn(["add"] + paths)
def _delete_parent_directories(self, path):
if not self.in_working_directory(path):
@@ -239,6 +256,13 @@ class SVN(SCM, SVNRepository):
def svn_revision(self, path):
return self.value_from_svn_info(path, 'Revision')
+ def timestamp_of_revision(self, path, revision):
+ # We use --xml to get timestamps like 2013-02-08T08:18:04.964409Z
+ repository_root = self.value_from_svn_info(self.checkout_root, 'Repository Root')
+ info_output = Executive().run_command([self.executable_name, 'log', '-r', revision, '--xml', repository_root], cwd=path).rstrip()
+ match = re.search(r"^<date>(?P<value>.+)</date>\r?$", info_output, re.MULTILINE)
+ return match.group('value')
+
# FIXME: This method should be on Checkout.
def create_patch(self, git_commit=None, changed_files=None):
"""Returns a byte array (str()) representing the patch file.
@@ -266,11 +290,12 @@ class SVN(SCM, SVNRepository):
return self._run_svn(['diff', '-c', revision])
def _bogus_dir_name(self):
+ rnd = ''.join(random.sample(string.ascii_letters, 5))
if sys.platform.startswith("win"):
parent_dir = tempfile.gettempdir()
else:
parent_dir = sys.path[0] # tempdir is not secure.
- return os.path.join(parent_dir, "temp_svn_config")
+ return os.path.join(parent_dir, "temp_svn_config_" + rnd)
def _setup_bogus_dir(self, log):
self._bogus_dir = self._bogus_dir_name()
diff --git a/Tools/Scripts/webkitpy/common/config/build.py b/Tools/Scripts/webkitpy/common/config/build.py
deleted file mode 100644
index 2ecacc7ad..000000000
--- a/Tools/Scripts/webkitpy/common/config/build.py
+++ /dev/null
@@ -1,135 +0,0 @@
-# Copyright (C) 2010 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
-# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Functions relating to building WebKit"""
-
-import re
-
-
-def _should_file_trigger_build(target_platform, file):
- # The directories and patterns lists below map directory names or
- # regexp patterns to the bot platforms for which they should trigger a
- # build. Mapping to the empty list means that no builds should be
- # triggered on any platforms. Earlier directories/patterns take
- # precendence over later ones.
-
- # FIXME: The patterns below have only been verified to be correct on
- # the platforms listed below. We should implement this for other platforms
- # and start using it for their bots. Someone familiar with each platform
- # will have to figure out what the right set of directories/patterns is for
- # that platform.
- assert(target_platform in ("mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"))
-
- directories = [
- # Directories that shouldn't trigger builds on any bots.
- ("Examples", []),
- ("PerformanceTests", []),
- ("ManualTests", []),
- ("Tools/BuildSlaveSupport/build.webkit.org-config/public_html", []),
- ("Websites", []),
- ("efl", []),
- ("iphone", []),
- ("opengl", []),
- ("opentype", []),
- ("openvg", []),
- ("wince", []),
- ("wx", []),
-
- # Directories that should trigger builds on only some bots.
- ("Source/WebCore/image-decoders", ["chromium"]),
- ("LayoutTests/platform/mac", ["mac", "win"]),
- ("cairo", ["gtk", "wincairo"]),
- ("cf", ["chromium-mac", "mac", "qt", "win"]),
- ("chromium", ["chromium"]),
- ("cocoa", ["chromium-mac", "mac"]),
- ("curl", ["gtk", "wincairo"]),
- ("gobject", ["gtk"]),
- ("gpu", ["chromium", "mac"]),
- ("gstreamer", ["gtk"]),
- ("gtk", ["gtk"]),
- ("mac", ["chromium-mac", "mac"]),
- ("mac-leopard", ["mac-leopard"]),
- ("mac-lion", ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
- ("mac-snowleopard", ["mac-leopard", "mac-snowleopard"]),
- ("mac-wk2", ["mac-lion", "mac-snowleopard", "mac-mountainlion", "win"]),
- ("objc", ["mac"]),
- ("qt", ["qt"]),
- ("skia", ["chromium"]),
- ("soup", ["gtk"]),
- ("v8", ["chromium"]),
- ("win", ["chromium-win", "win"]),
- ]
- patterns = [
- # Patterns that shouldn't trigger builds on any bots.
- (r"(?:^|/)ChangeLog.*$", []),
- (r"(?:^|/)Makefile$", []),
- (r"/ARM", []),
- (r"/CMake.*", []),
- (r"/LICENSE[^/]+$", []),
- (r"ARM(?:v7)?\.(?:cpp|h)$", []),
- (r"MIPS\.(?:cpp|h)$", []),
- (r"WinCE\.(?:cpp|h|mm)$", []),
- (r"\.(?:bkl|mk)$", []),
-
- # Patterns that should trigger builds on only some bots.
- (r"(?:^|/)GNUmakefile\.am$", ["gtk"]),
- (r"/\w+Chromium\w*\.(?:cpp|h|mm)$", ["chromium"]),
- (r"Mac\.(?:cpp|h|mm)$", ["mac"]),
- (r"\.(?:vcproj|vsprops|sln)$", ["win"]),
- (r"\.exp(?:\.in)?$", ["mac"]),
- (r"\.gypi?", ["chromium"]),
- (r"\.order$", ["mac"]),
- (r"\.pr[io]$", ["qt"]),
- (r"\.vcproj/", ["win"]),
- (r"\.xcconfig$", ["mac"]),
- (r"\.xcodeproj/", ["mac"]),
- ]
-
- base_platform = target_platform.split("-")[0]
-
- # See if the file is in one of the known directories.
- for directory, platforms in directories:
- if re.search(r"(?:^|/)%s/" % directory, file):
- return target_platform in platforms or base_platform in platforms
-
- # See if the file matches a known pattern.
- for pattern, platforms in patterns:
- if re.search(pattern, file):
- return target_platform in platforms or base_platform in platforms
-
- # See if the file is a platform-specific test result.
- match = re.match("LayoutTests/platform/(?P<platform>[^/]+)/", file)
- if match:
- # See if the file is a test result for this platform, our base
- # platform, or one of our sub-platforms.
- return match.group("platform") in (target_platform, base_platform) or match.group("platform").startswith("%s-" % target_platform)
-
- # The file isn't one we know about specifically, so we should assume we
- # have to build.
- return True
-
-
-def should_build(target_platform, changed_files):
- """Returns true if the changed files affect the given platform, and
- thus a build should be performed. target_platform should be one of the
- platforms used in the build.webkit.org master's config.json file."""
- return any(_should_file_trigger_build(target_platform, file) for file in changed_files)
diff --git a/Tools/Scripts/webkitpy/common/config/build_unittest.py b/Tools/Scripts/webkitpy/common/config/build_unittest.py
deleted file mode 100644
index c496179e4..000000000
--- a/Tools/Scripts/webkitpy/common/config/build_unittest.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2010 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
-# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest
-
-from webkitpy.common.config import build
-
-
-class ShouldBuildTest(unittest.TestCase):
- _should_build_tests = [
- (["ChangeLog", "Source/WebCore/ChangeLog", "Source/WebKit2/ChangeLog-2011-02-11"], []),
- (["GNUmakefile.am", "Source/WebCore/GNUmakefile.am"], ["gtk"]),
- (["Websites/bugs.webkit.org/foo", "Source/WebCore/bar"], ["*"]),
- (["Websites/bugs.webkit.org/foo"], []),
- (["Source/JavaScriptCore/JavaScriptCore.xcodeproj/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
- (["Source/JavaScriptCore/JavaScriptCore.vcproj/foo", "Source/WebKit2/win/WebKit2.vcproj", "Source/WebKit/win/WebKit.sln", "Tools/WebKitTestRunner/Configurations/WebKitTestRunnerCommon.vsprops"], ["win"]),
- (["LayoutTests/platform/mac/foo", "Source/WebCore/bar"], ["*"]),
- (["LayoutTests/foo"], ["*"]),
- (["LayoutTests/canvas/philip/tests/size.attributes.parse.exp-expected.txt", "LayoutTests/canvas/philip/tests/size.attributes.parse.exp.html"], ["*"]),
- (["LayoutTests/platform/chromium-linux/foo"], ["chromium-linux"]),
- (["LayoutTests/platform/chromium-win/fast/compact/001-expected.txt"], ["chromium-win"]),
- (["LayoutTests/platform/mac-leopard/foo"], ["mac-leopard"]),
- (["LayoutTests/platform/mac-lion/foo"], ["mac-leopard", "mac-lion", "mac-snowleopard", "win"]),
- (["LayoutTests/platform/mac-snowleopard/foo"], ["mac-leopard", "mac-snowleopard"]),
- (["LayoutTests/platform/mac-wk2/Skipped"], ["mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
- (["LayoutTests/platform/mac/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]),
- (["LayoutTests/platform/win-xp/foo"], ["win"]),
- (["LayoutTests/platform/win-wk2/foo"], ["win"]),
- (["LayoutTests/platform/win/foo"], ["win"]),
- (["Source/WebCore.exp.in", "Source/WebKit/mac/WebKit.exp"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
- (["Source/WebCore/mac/foo"], ["chromium-mac", "mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
- (["Source/WebCore/win/foo"], ["chromium-win", "win"]),
- (["Source/WebCore/platform/graphics/gpu/foo"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
- (["Source/WebCore/platform/wx/wxcode/win/foo"], []),
- (["Source/WebCore/rendering/RenderThemeMac.mm", "Source/WebCore/rendering/RenderThemeMac.h"], ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard"]),
- (["Source/WebCore/rendering/RenderThemeChromiumLinux.h"], ["chromium-linux"]),
- (["Source/WebCore/rendering/RenderThemeWinCE.h"], []),
- (["Tools/BuildSlaveSupport/build.webkit.org-config/public_html/LeaksViewer/LeaksViewer.js"], []),
- ]
-
- def test_should_build(self):
- for files, platforms in self._should_build_tests:
- # FIXME: We should test more platforms here once
- # build._should_file_trigger_build is implemented for them.
- for platform in ["mac-leopard", "mac-lion", "mac-mountainlion", "mac-snowleopard", "win"]:
- should_build = platform in platforms or "*" in platforms
- self.assertEqual(build.should_build(platform, files), should_build, "%s should%s have built but did%s (files: %s)" % (platform, "" if should_build else "n't", "n't" if should_build else "", str(files)))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py
index e49eebf52..62a09329e 100644
--- a/Tools/Scripts/webkitpy/common/config/committers.py
+++ b/Tools/Scripts/webkitpy/common/config/committers.py
@@ -29,9 +29,18 @@
#
# WebKit's Python module for committer and reviewer validation.
+import fnmatch
+import json
+
from webkitpy.common.editdistance import edit_distance
+from webkitpy.common.memoized import memoized
+from webkitpy.common.system.filesystem import FileSystem
+
+
+# The list of contributors have been moved to contributors.json
-class Account(object):
+
+class Contributor(object):
def __init__(self, name, email_or_emails, irc_nickname_or_nicknames=None):
assert(name)
assert(email_or_emails)
@@ -54,6 +63,9 @@ class Account(object):
return self.emails[0]
def __str__(self):
+ return unicode(self).encode('utf-8')
+
+ def __unicode__(self):
return '"%s" <%s>' % (self.full_name, self.emails[0])
def contains_string(self, search_string):
@@ -69,11 +81,17 @@ class Account(object):
return True
return False
-
-class Contributor(Account):
- def __init__(self, name, email_or_emails, irc_nickname=None):
- Account.__init__(self, name, email_or_emails, irc_nickname)
- self.is_contributor = True
+ def matches_glob(self, glob_string):
+ if fnmatch.fnmatch(self.full_name, glob_string):
+ return True
+ if self.irc_nicknames:
+ for nickname in self.irc_nicknames:
+ if fnmatch.fnmatch(nickname, glob_string):
+ return True
+ for email in self.emails:
+ if fnmatch.fnmatch(email, glob_string):
+ return True
+ return False
class Committer(Contributor):
@@ -88,490 +106,20 @@ class Reviewer(Committer):
self.can_review = True
-# This is a list of email addresses that have bugzilla accounts but are not
-# used for contributing (such as mailing lists).
-
-
-watchers_who_are_not_contributors = [
- Account("Chromium Compositor Bugs", ["cc-bugs@chromium.org"], ""),
- Account("Chromium Media Reviews", ["feature-media-reviews@chromium.org"], ""),
- Account("David Levin", ["levin+threading@chromium.org"], ""),
- Account("David Levin", ["levin+watchlist@chromium.org"], ""),
- Account("Kent Tamura", ["tkent+wkapi@chromium.org"], ""),
-]
-
-
-# This is a list of people (or bots) who are neither committers nor reviewers, but get
-# frequently CC'ed by others on Bugzilla bugs, so their names should be
-# supported by autocomplete. No review needed to add to the list.
-
-
-contributors_who_are_not_committers = [
- Contributor("Adobe Bug Tracker", "WebkitBugTracker@adobe.com"),
- Contributor("Aharon Lanin", "aharon@google.com"),
- Contributor("Alan Stearns", "stearns@adobe.com", "astearns"),
- Contributor("Alejandro Pineiro", "apinheiro@igalia.com"),
- Contributor("Alexey Marinichev", ["amarinichev@chromium.org", "amarinichev@google.com"], "amarinichev"),
- Contributor("Andras Piroska", "pandras@inf.u-szeged.hu", "andris88"),
- Contributor("Andrei Bucur", "abucur@adobe.com", "abucur"),
- Contributor("Anne van Kesteren", "annevankesteren+webkit@gmail.com", "annevk"),
- Contributor("Annie Sullivan", "sullivan@chromium.org", "annie"),
- Contributor("Aryeh Gregor", "ayg@aryeh.name", "AryehGregor"),
- Contributor("Balazs Ankes", "bank@inf.u-szeged.hu", "abalazs"),
- Contributor("Brian Salomon", "bsalomon@google.com"),
- Contributor("Commit Queue", "commit-queue@webkit.org"),
- Contributor("Daniel Sievers", "sievers@chromium.org"),
- Contributor("David Dorwin", "ddorwin@chromium.org", "ddorwin"),
- Contributor("David Reveman", "reveman@chromium.org", "reveman"),
- Contributor("Dongsung Huang", "luxtella@company100.net", "Huang"),
- Contributor("Douglas Davidson", "ddavidso@apple.com"),
- Contributor("Edward O'Connor", "eoconnor@apple.com", "hober"),
- Contributor("Elliott Sprehn", "esprehn@chromium.org", "esprehn"),
- Contributor("Eric Penner", "epenner@chromium.org", "epenner"),
- Contributor("Felician Marton", ["felician@inf.u-szeged.hu", "marton.felician.zoltan@stud.u-szeged.hu"], "Felician"),
- Contributor("Finnur Thorarinsson", ["finnur@chromium.org", "finnur.webkit@gmail.com"], "finnur"),
- Contributor("Forms Bugs", "forms-bugs@chromium.org"),
- Contributor("Glenn Adams", "glenn@skynav.com", "gasubic"),
- Contributor("Gabor Ballabas", "gaborb@inf.u-szeged.hu", "bgabor"),
- Contributor("Grace Kloba", "klobag@chromium.org", "klobag"),
- Contributor("Greg Simon", "gregsimon@chromium.org", "gregsimon"),
- Contributor("Gregg Tavares", ["gman@google.com", "gman@chromium.org"], "gman"),
- Contributor("Hao Zheng", "zhenghao@chromium.org"),
- Contributor("Harald Alvestrand", "hta@google.com", "hta"),
- Contributor("Ian Hickson", "ian@hixie.ch", "hixie"),
- Contributor("Janos Badics", "jbadics@inf.u-szeged.hu", "dicska"),
- Contributor("Jonathan Backer", "backer@chromium.org", "backer"),
- Contributor("Jeff Timanus", ["twiz@chromium.org", "twiz@google.com"], "twiz"),
- Contributor("Jing Zhao", "jingzhao@chromium.org"),
- Contributor("Joanmarie Diggs", "jdiggs@igalia.com"),
- Contributor("John Bates", ["jbates@google.com", "jbates@chromium.org"], "jbates"),
- Contributor("John Bauman", ["jbauman@chromium.org", "jbauman@google.com"], "jbauman"),
- Contributor("John Mellor", "johnme@chromium.org", "johnme"),
- Contributor("Kulanthaivel Palanichamy", "kulanthaivel@codeaurora.org", "kvel"),
- Contributor("Kiran Muppala", "cmuppala@apple.com", "kiranm"),
- Contributor("Mihai Balan", "mibalan@adobe.com", "miChou"),
- Contributor("Min Qin", "qinmin@chromium.org"),
- Contributor("Nandor Huszka", "hnandor@inf.u-szeged.hu", "hnandor"),
- Contributor("Oliver Varga", ["voliver@inf.u-szeged.hu", "Varga.Oliver@stud.u-szeged.hu"], "TwistO"),
- Contributor("Peter Gal", "galpeter@inf.u-szeged.hu", "elecro"),
- Contributor("Peter Linss", "peter.linss@hp.com", "plinss"),
- Contributor("Pravin D", "pravind.2k4@gmail.com", "pravind"),
- Contributor("Radar WebKit Bug Importer", "webkit-bug-importer@group.apple.com"),
- Contributor("Raul Hudea", "rhudea@adobe.com", "rhudea"),
- Contributor("Roland Takacs", "rtakacs@inf.u-szeged.hu", "rtakacs"),
- Contributor(u"Sami Ky\u00f6stil\u00e4", "skyostil@chromium.org", "skyostil"),
- Contributor("Szilard Ledan-Muntean", "szledan@inf.u-szeged.hu", "szledan"),
- Contributor("Tab Atkins", ["tabatkins@google.com", "jackalmage@gmail.com"], "tabatkins"),
- Contributor("Tamas Czene", ["tczene@inf.u-szeged.hu", "Czene.Tamas@stud.u-szeged.hu"], "tczene"),
- Contributor("Tien-Ren Chen", "trchen@chromium.org", "trchen"),
- Contributor("WebKit Review Bot", "webkit.review.bot@gmail.com", "sheriff-bot"),
- Contributor("Web Components Team", "webcomponents-bugzilla@chromium.org"),
- Contributor("Wyatt Carss", ["wcarss@chromium.org", "wcarss@google.com"], "wcarss"),
- Contributor("Zeev Lieber", "zlieber@chromium.org"),
- Contributor("Zoltan Arvai", "zarvai@inf.u-szeged.hu", "azbest_hu"),
- Contributor("Zsolt Feher", "feherzs@inf.u-szeged.hu", "Smith"),
-]
-
-
-# This is intended as a canonical, machine-readable list of all non-reviewer
-# committers for WebKit. If your name is missing here and you are a committer,
-# please add it. No review needed. All reviewers are committers, so this list
-# is only of committers who are not reviewers.
-
-
-committers_unable_to_review = [
- Committer("Aaron Boodman", "aa@chromium.org", "aboodman"),
- Committer("Adam Bergkvist", "adam.bergkvist@ericsson.com", "adambe"),
- Committer("Adam Kallai", "kadam@inf.u-szeged.hu", "kadam"),
- Committer("Adam Klein", "adamk@chromium.org", "aklein"),
- Committer("Adam Langley", "agl@chromium.org", "agl"),
- Committer("Ademar de Souza Reis Jr", ["ademar.reis@gmail.com", "ademar@webkit.org"], "ademar"),
- Committer("Albert J. Wong", "ajwong@chromium.org"),
- Committer("Alec Flett", ["alecflett@chromium.org", "alecflett@google.com"], "alecf"),
- Committer(u"Alexander F\u00e6r\u00f8y", ["ahf@0x90.dk", "alexander.faeroy@nokia.com"], "ahf"),
- Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"], "lypanov"),
- Committer("Alexandre Elias", ["aelias@chromium.org", "aelias@google.com"], "aelias"),
- Committer("Alexandru Chiculita", "achicu@adobe.com", "achicu"),
- Committer("Alice Boxhall", "aboxhall@chromium.org", "aboxhall"),
- Committer("Allan Sandfeld Jensen", ["allan.jensen@digia.com", "kde@carewolf.com", "sandfeld@kde.org", "allan.jensen@nokia.com"], "carewolf"),
- Committer("Alok Priyadarshi", "alokp@chromium.org", "alokp"),
- Committer("Ami Fischman", ["fischman@chromium.org", "fischman@google.com"], "fischman"),
- Committer("Amruth Raj", "amruthraj@motorola.com", "amruthraj"),
- Committer("Andre Boule", "aboule@apple.com"),
- Committer("Andrei Popescu", "andreip@google.com", "andreip"),
- Committer("Andrew Wellington", ["andrew@webkit.org", "proton@wiretapped.net"], "proton"),
- Committer("Andrew Scherkus", "scherkus@chromium.org", "scherkus"),
- Committer("Andrey Adaykin", "aandrey@chromium.org", "aandrey"),
- Committer("Andrey Kosyakov", "caseq@chromium.org", "caseq"),
- Committer("Andras Becsi", ["abecsi@webkit.org", "andras.becsi@digia.com"], "bbandix"),
- Committer("Andy Wingo", "wingo@igalia.com", "wingo"),
- Committer("Anna Cavender", "annacc@chromium.org", "annacc"),
- Committer("Anthony Ricaud", "rik@webkit.org", "rik"),
- Committer("Antoine Labour", "piman@chromium.org", "piman"),
- Committer("Anton D'Auria", "adauria@apple.com", "antonlefou"),
- Committer("Anton Muhin", "antonm@chromium.org", "antonm"),
- Committer("Arko Saha", "arko@motorola.com", "arkos"),
- Committer("Arvid Nilsson", "anilsson@rim.com", "anilsson"),
- Committer("Balazs Kelemen", "kbalazs@webkit.org", "kbalazs"),
- Committer("Ben Murdoch", "benm@google.com", "benm"),
- Committer("Ben Wells", "benwells@chromium.org", "benwells"),
- Committer("Benjamin C Meyer", ["ben@meyerhome.net", "ben@webkit.org", "bmeyer@rim.com"], "icefox"),
- Committer("Benjamin Kalman", ["kalman@chromium.org", "kalman@google.com"], "kalman"),
- Committer("Benjamin Otte", ["otte@gnome.org", "otte@webkit.org"], "otte"),
- Committer("Bill Budge", ["bbudge@chromium.org", "bbudge@gmail.com"], "bbudge"),
- Committer("Brett Wilson", "brettw@chromium.org", "brettx"),
- Committer("Bruno de Oliveira Abinader", ["bruno.abinader@basyskom.com", "brunoabinader@gmail.com"], "abinader"),
- Committer("Cameron McCormack", ["cam@mcc.id.au", "cam@webkit.org"], "heycam"),
- Committer("Carol Szabo", ["carol@webkit.org", "carol.szabo@nokia.com"], "cszabo1"),
- Committer("Cary Clark", ["caryclark@google.com", "caryclark@chromium.org"], "caryclark"),
- Committer("Charles Reis", "creis@chromium.org", "creis"),
- Committer("Charles Wei", ["charles.wei@torchmobile.com.cn"], "cswei"),
- Committer("Chris Evans", ["cevans@google.com", "cevans@chromium.org"]),
- Committer("Chris Guillory", ["ctguil@chromium.org", "chris.guillory@google.com"], "ctguil"),
- Committer("Chris Petersen", "cpetersen@apple.com", "cpetersen"),
- Committer("Christian Dywan", ["christian@twotoasts.de", "christian@webkit.org", "christian@lanedo.com"]),
- Committer("Christophe Dumez", ["christophe.dumez@intel.com", "dchris@gmail.com"], "chris-qBT"),
- Committer("Collin Jackson", "collinj@webkit.org", "collinjackson"),
- Committer("Cris Neckar", "cdn@chromium.org", "cneckar"),
- Committer("Dan Winship", "danw@gnome.org", "danw"),
- Committer("Dana Jansens", "danakj@chromium.org", "danakj"),
- Committer("Daniel Cheng", "dcheng@chromium.org", "dcheng"),
- Committer("Dave Barton", "dbarton@mathscribe.com", "davebarton"),
- Committer("Dave Tharp", "dtharp@codeaurora.org", "dtharp"),
- Committer("David Michael Barr", ["davidbarr@chromium.org", "davidbarr@google.com", "b@rr-dav.id.au"], "barrbrain"),
- Committer("David Grogan", ["dgrogan@chromium.org", "dgrogan@google.com"], "dgrogan"),
- Committer("David Smith", ["catfish.man@gmail.com", "dsmith@webkit.org"], "catfishman"),
- Committer("Diego Gonzalez", ["diegohcg@webkit.org", "diego.gonzalez@openbossa.org"], "diegohcg"),
- Committer("Dinu Jacob", "dinu.s.jacob@intel.com", "dsjacob"),
- Committer("Dmitry Lomov", ["dslomov@google.com", "dslomov@chromium.org"], "dslomov"),
- Committer("Dominic Cooney", ["dominicc@chromium.org", "dominicc@google.com"], "dominicc"),
- Committer("Dominic Mazzoni", ["dmazzoni@google.com", "dmazzoni@chromium.org"], "dmazzoni"),
- Committer(u"Dominik R\u00f6ttsches", ["dominik.rottsches@intel.com", "d-r@roettsches.de"], "drott"),
- Committer("Drew Wilson", "atwilson@chromium.org", "atwilson"),
- Committer("Eli Fidler", ["eli@staikos.net", "efidler@rim.com"], "efidler"),
- Committer("Elliot Poger", "epoger@chromium.org", "epoger"),
- Committer("Erik Arvidsson", "arv@chromium.org", "arv"),
- Committer("Eric Roman", "eroman@chromium.org", "eroman"),
- Committer("Eric Uhrhane", "ericu@chromium.org", "ericu"),
- Committer("Evan Martin", "evan@chromium.org", "evmar"),
- Committer("Evan Stade", "estade@chromium.org", "estade"),
- Committer("Fady Samuel", "fsamuel@chromium.org", "fsamuel"),
- Committer("Feng Qian", "feng@chromium.org"),
- Committer("Florin Malita", ["fmalita@chromium.org", "fmalita@google.com"], "fmalita"),
- Committer("Fumitoshi Ukai", "ukai@chromium.org", "ukai"),
- Committer("Gabor Loki", "loki@webkit.org", "loki04"),
- Committer("Gabor Rapcsanyi", ["rgabor@webkit.org", "rgabor@inf.u-szeged.hu"], "rgabor"),
- Committer("Gavin Peters", ["gavinp@chromium.org", "gavinp@webkit.org", "gavinp@google.com"], "gavinp"),
- Committer("Girish Ramakrishnan", ["girish@forwardbias.in", "ramakrishnan.girish@gmail.com"], "girishr"),
- Committer("Graham Dennis", ["Graham.Dennis@gmail.com", "gdennis@webkit.org"]),
- Committer("Greg Bolsinga", "bolsinga@apple.com"),
- Committer("Grzegorz Czajkowski", "g.czajkowski@samsung.com", "grzegorz"),
- Committer("Hans Wennborg", "hans@chromium.org", "hwennborg"),
- Committer("Hayato Ito", "hayato@chromium.org", "hayato"),
- Committer("Hironori Bono", "hbono@chromium.org", "hbono"),
- Committer("Helder Correia", "helder.correia@nokia.com", "helder"),
- Committer("Hin-Chung Lam", ["hclam@google.com", "hclam@chromium.org"]),
- Committer("Hugo Parente Lima", "hugo.lima@openbossa.org", "hugopl"),
- Committer("Ian Vollick", "vollick@chromium.org", "vollick"),
- Committer("Igor Trindade Oliveira", ["igor.oliveira@webkit.org", "igor.o@sisa.samsung.com"], "igoroliveira"),
- Committer("Ilya Sherman", "isherman@chromium.org", "isherman"),
- Committer("Ilya Tikhonovsky", "loislo@chromium.org", "loislo"),
- Committer("Ivan Krsti\u0107", "ike@apple.com"),
- Committer("Jacky Jiang", ["jkjiang@webkit.org", "zkjiang008@gmail.com", "zhajiang@rim.com"], "jkjiang"),
- Committer("Jakob Petsovits", ["jpetsovits@rim.com", "jpetso@gmx.at"], "jpetso"),
- Committer("Jakub Wieczorek", "jwieczorek@webkit.org", "fawek"),
- Committer("James Hawkins", ["jhawkins@chromium.org", "jhawkins@google.com"], "jhawkins"),
- Committer("James Kozianski", ["koz@chromium.org", "koz@google.com"], "koz"),
- Committer("James Simonsen", "simonjam@chromium.org", "simonjam"),
- Committer("Jarred Nicholls", ["jarred@webkit.org", "jarred@sencha.com"], "jarrednicholls"),
- Committer("Jason Liu", ["jason.liu@torchmobile.com.cn", "jasonliuwebkit@gmail.com"], "jasonliu"),
- Committer("Jay Civelli", "jcivelli@chromium.org", "jcivelli"),
- Committer("Jeff Miller", "jeffm@apple.com", "jeffm7"),
- Committer("Jeffrey Pfau", ["jeffrey@endrift.com", "jpfau@apple.com"], "jpfau"),
- Committer("Jenn Braithwaite", "jennb@chromium.org", "jennb"),
- Committer("Jens Alfke", ["snej@chromium.org", "jens@apple.com"]),
- Committer("Jer Noble", "jer.noble@apple.com", "jernoble"),
- Committer("Jeremy Moskovich", ["playmobil@google.com", "jeremy@chromium.org"], "jeremymos"),
- Committer("Jesus Sanchez-Palencia", ["jesus@webkit.org", "jesus.palencia@openbossa.org"], "jeez_"),
- Committer("Jia Pu", "jpu@apple.com"),
- Committer("Joe Thomas", "joethomas@motorola.com", "joethomas"),
- Committer("John Abd-El-Malek", "jam@chromium.org", "jam"),
- Committer("John Gregg", ["johnnyg@google.com", "johnnyg@chromium.org"], "johnnyg"),
- Committer("John Knottenbelt", "jknotten@chromium.org", "jknotten"),
- Committer("Johnny Ding", ["jnd@chromium.org", "johnnyding.webkit@gmail.com"], "johnnyding"),
- Committer("Jon Lee", "jonlee@apple.com", "jonlee"),
- Committer("Jonathan Dong", ["jonathan.dong@torchmobile.com.cn"], "jondong"),
- Committer("Joone Hur", ["joone@webkit.org", "joone.hur@intel.com"], "joone"),
- Committer("Joost de Valk", ["joost@webkit.org", "webkit-dev@joostdevalk.nl"], "Altha"),
- Committer("Joshua Bell", ["jsbell@chromium.org", "jsbell@google.com"], "jsbell"),
- Committer("Julie Parent", ["jparent@google.com", "jparent@chromium.org"], "jparent"),
- Committer("Jungshik Shin", "jshin@chromium.org"),
- Committer("Justin Novosad", ["junov@google.com", "junov@chromium.org"], "junov"),
- Committer("Justin Schuh", "jschuh@chromium.org", "jschuh"),
- Committer("Kaustubh Atrawalkar", ["kaustubh@motorola.com"], "silverroots"),
- Committer("Keishi Hattori", "keishi@webkit.org", "keishi"),
- Committer("Kelly Norton", "knorton@alum.mit.edu"),
- Committer("Ken Buchanan", "kenrb@chromium.org", "kenrb"),
- Committer("Kenichi Ishibashi", "bashi@chromium.org", "bashi"),
- Committer("Kenji Imasaki", "imasaki@chromium.org", "imasaki"),
- Committer("Kent Hansen", "kent.hansen@nokia.com", "khansen"),
- Committer("Kihong Kwon", "kihong.kwon@samsung.com", "kihong"),
- Committer(u"Kim Gr\u00f6nholm", "kim.1.gronholm@nokia.com"),
- Committer("Kimmo Kinnunen", ["kimmo.t.kinnunen@nokia.com", "kimmok@iki.fi", "ktkinnun@webkit.org"], "kimmok"),
- Committer("Kinuko Yasuda", "kinuko@chromium.org", "kinuko"),
- Committer("Konrad Piascik", "kpiascik@rim.com", "kpiascik"),
- Committer("Kristof Kosztyo", "kkristof@inf.u-szeged.hu", "kkristof"),
- Committer("Krzysztof Kowalczyk", "kkowalczyk@gmail.com"),
- Committer("Kwang Yul Seo", ["skyul@company100.net", "kseo@webkit.org"], "kseo"),
- Committer("Lauro Neto", "lauro.neto@openbossa.org", "lmoura"),
- Committer("Leandro Gracia Gil", "leandrogracia@chromium.org", "leandrogracia"),
- Committer("Leandro Pereira", ["leandro@profusion.mobi", "leandro@webkit.org"], "acidx"),
- Committer("Leo Yang", ["leoyang@rim.com", "leoyang@webkit.org", "leoyang.webkit@gmail.com"], "leoyang"),
- Committer("Li Yin", ["li.yin@intel.com"], "liyin"),
- Committer("Lucas De Marchi", ["demarchi@webkit.org", "lucas.demarchi@profusion.mobi"], "demarchi"),
- Committer("Lucas Forschler", ["lforschler@apple.com"], "lforschler"),
- Committer("Luciano Wolf", "luciano.wolf@openbossa.org", "luck"),
- Committer("Luke Macpherson", ["macpherson@chromium.org", "macpherson@google.com"], "macpherson"),
- Committer("Mads Ager", "ager@chromium.org"),
- Committer("Mahesh Kulkarni", ["mahesh.kulkarni@nokia.com", "maheshk@webkit.org"], "maheshk"),
- Committer("Marcus Voltis Bulach", "bulach@chromium.org"),
- Committer("Mario Sanchez Prada", ["mario@webkit.org"], "msanchez"),
- Committer("Mark Lam", "mark.lam@apple.com", "mlam"),
- Committer("Mary Wu", ["mary.wu@torchmobile.com.cn", "wwendy2007@gmail.com"], "marywu"),
- Committer("Matt Delaney", "mdelaney@apple.com"),
- Committer("Matt Falkenhagen", "falken@chromium.org", "falken"),
- Committer("Matt Lilek", ["mlilek@apple.com", "webkit@mattlilek.com", "pewtermoose@webkit.org"], "pewtermoose"),
- Committer("Matt Perry", "mpcomplete@chromium.org"),
- Committer("Maxime Britto", ["maxime.britto@gmail.com", "britto@apple.com"]),
- Committer("Maxime Simon", ["simon.maxime@gmail.com", "maxime.simon@webkit.org"], "maxime.simon"),
- Committer(u"Michael Br\u00fcning", ["michaelbruening@gmail.com", "michael.bruning@digia.com", "michael.bruning@nokia.com"], "mibrunin"),
- Committer("Michael Nordman", "michaeln@google.com", "michaeln"),
- Committer("Michelangelo De Simone", "michelangelo@webkit.org", "michelangelo"),
- Committer("Mihnea Ovidenie", "mihnea@adobe.com", "mihnea"),
- Committer("Mike Belshe", ["mbelshe@chromium.org", "mike@belshe.com"]),
- Committer("Mike Fenton", ["mifenton@rim.com", "mike.fenton@torchmobile.com"], "mfenton"),
- Committer("Mike Lawther", "mikelawther@chromium.org", "mikelawther"),
- Committer("Mike Reed", "reed@google.com", "reed"),
- Committer("Mike Thole", ["mthole@mikethole.com", "mthole@apple.com"]),
- Committer("Mike West", ["mkwst@chromium.org", "mike@mikewest.org"], "mkwst"),
- Committer("Mikhail Naganov", "mnaganov@chromium.org"),
- Committer("Naoki Takano", ["honten@chromium.org", "takano.naoki@gmail.com"], "honten"),
- Committer("Nat Duca", ["nduca@chromium.org", "nduca@google.com"], "nduca"),
- Committer("Nayan Kumar K", ["nayankk@motorola.com", "nayankk@gmail.com"], "xc0ffee"),
- Committer("Nico Weber", ["thakis@chromium.org", "thakis@google.com"], "thakis"),
- Committer("Nima Ghanavatian", ["nghanavatian@rim.com", "nima.ghanavatian@gmail.com"], "nghanavatian"),
- Committer("Noel Gordon", ["noel.gordon@gmail.com", "noel@chromium.org", "noel@google.com"], "noel"),
- Committer("Pam Greene", "pam@chromium.org", "pamg"),
- Committer("Patrick Gansterer", ["paroga@paroga.com", "paroga@webkit.org"], "paroga"),
- Committer("Pavel Podivilov", "podivilov@chromium.org", "podivilov"),
- Committer("Peter Beverloo", ["peter@chromium.org", "peter@webkit.org", "beverloo@google.com"], "beverloo"),
- Committer("Peter Kasting", ["pkasting@google.com", "pkasting@chromium.org"], "pkasting"),
- Committer("Peter Varga", ["pvarga@webkit.org", "pvarga@inf.u-szeged.hu"], "stampho"),
- Committer("Philip Rogers", ["pdr@google.com", "pdr@chromium.org"], "pdr"),
- Committer("Pierre d'Herbemont", ["pdherbemont@free.fr", "pdherbemont@apple.com"], "pdherbemont"),
- Committer("Pierre-Olivier Latour", "pol@apple.com", "pol"),
- Committer("Pierre Rossi", "pierre.rossi@gmail.com", "elproxy"),
- Committer("Pratik Solanki", "psolanki@apple.com", "psolanki"),
- Committer("Qi Zhang", "qi.zhang02180@gmail.com", "qi"),
- Committer("Rafael Antognolli", "antognolli@profusion.mobi", "antognolli"),
- Committer("Rafael Brandao", "rafael.lobo@openbossa.org", "rafaelbrandao"),
- Committer("Rafael Weinstein", "rafaelw@chromium.org", "rafaelw"),
- Committer("Raphael Kubo da Costa", ["rakuco@webkit.org", "rakuco@FreeBSD.org", "raphael.kubo.da.costa@intel.com"], "rakuco"),
- Committer("Ravi Kasibhatla", "ravi.kasibhatla@motorola.com", "kphanee"),
- Committer("Renata Hodovan", "reni@webkit.org", "reni"),
- Committer("Robert Hogan", ["robert@webkit.org", "robert@roberthogan.net", "lists@roberthogan.net"], "mwenge"),
- Committer("Robert Kroeger", "rjkroege@chromium.org", "rjkroege"),
- Committer("Roger Fong", "roger_fong@apple.com", "rfong"),
- Committer("Roland Steiner", "rolandsteiner@chromium.org"),
- Committer("Ryuan Choi", "ryuan.choi@samsung.com", "ryuan"),
- Committer("Satish Sampath", "satish@chromium.org"),
- Committer("Scott Violet", "sky@chromium.org", "sky"),
- Committer("Sergio Villar Senin", ["svillar@igalia.com", "sergio@webkit.org"], "svillar"),
- Committer("Shawn Singh", "shawnsingh@chromium.org", "shawnsingh"),
- Committer("Shinya Kawanaka", "shinyak@chromium.org", "shinyak"),
- Committer("Siddharth Mathur", "siddharth.mathur@nokia.com", "simathur"),
- Committer("Simon Pena", "spena@igalia.com", "spenap"),
- Committer("Stephen Chenney", "schenney@chromium.org", "schenney"),
- Committer("Steve Lacey", "sjl@chromium.org", "stevela"),
- Committer("Taiju Tsuiki", "tzik@chromium.org", "tzik"),
- Committer("Takashi Sakamoto", "tasak@google.com", "tasak"),
- Committer("Takashi Toyoshima", "toyoshim@chromium.org", "toyoshim"),
- Committer("Terry Anderson", "tdanderson@chromium.org", "tdanderson"),
- Committer("Thiago Marcos P. Santos", ["tmpsantos@gmail.com", "thiago.santos@intel.com"], "tmpsantos"),
- Committer("Thomas Sepez", "tsepez@chromium.org", "tsepez"),
- Committer("Tom Hudson", ["tomhudson@google.com", "tomhudson@chromium.org"], "tomhudson"),
- Committer("Tom Zakrajsek", "tomz@codeaurora.org", "tomz"),
- Committer("Tommy Widenflycht", "tommyw@google.com", "tommyw"),
- Committer("Trey Matteson", "trey@usa.net", "trey"),
- Committer("Tristan O'Tierney", ["tristan@otierney.net", "tristan@apple.com"]),
- Committer("Vangelis Kokkevis", "vangelis@chromium.org", "vangelis"),
- Committer("Viatcheslav Ostapenko", ["ostap73@gmail.com", "v.ostapenko@samsung.com", "v.ostapenko@sisa.samsung.com"], "ostap"),
- Committer("Victor Carbune", "victor@rosedu.org", "vcarbune"),
- Committer("Victor Wang", "victorw@chromium.org", "victorw"),
- Committer("Victoria Kirst", ["vrk@chromium.org", "vrk@google.com"], "vrk"),
- Committer("Vincent Scheib", "scheib@chromium.org", "scheib"),
- Committer("Vineet Chaudhary", "rgf748@motorola.com", "vineetc"),
- Committer("Vitaly Repeshko", "vitalyr@chromium.org"),
- Committer("Vivek Galatage", ["vivekg@webkit.org", "vivek.vg@samsung.com"], "vivekg"),
- Committer("William Siegrist", "wsiegrist@apple.com", "wms"),
- Committer("W. James MacLean", "wjmaclean@chromium.org", "seumas"),
- Committer("Xianzhu Wang", ["wangxianzhu@chromium.org", "phnixwxz@gmail.com", "wangxianzhu@google.com"], "wangxianzhu"),
- Committer("Xiaomei Ji", "xji@chromium.org", "xji"),
- Committer("Yael Aharon", "yael@webkit.org", "yael"),
- Committer("Yaar Schnitman", ["yaar@chromium.org", "yaar@google.com"]),
- Committer("Yi Shen", ["yi.4.shen@nokia.com", "shenyi2006@gmail.com"]),
- Committer("Yongjun Zhang", ["yongjun.zhang@nokia.com", "yongjun_zhang@apple.com"]),
- Committer("Yoshifumi Inoue", "yosin@chromium.org", "yosin"),
- Committer("Yuqiang Xian", "yuqiang.xian@intel.com"),
- Committer("Yuzo Fujishima", "yuzo@google.com", "yuzo"),
- Committer("Zalan Bujtas", ["zbujtas@gmail.com", "zalan.bujtas@nokia.com"], "zalan"),
- Committer("Zeno Albisser", ["zeno@webkit.org", "zeno.albisser@nokia.com"], "zalbisser"),
- Committer("Zhenyao Mo", "zmo@google.com", "zhenyao"),
- Committer("Zoltan Horvath", ["zoltan@webkit.org", "hzoltan@inf.u-szeged.hu", "horvath.zoltan.6@stud.u-szeged.hu"], "zoltan"),
- Committer(u"\u017dan Dober\u0161ek", "zandobersek@gmail.com", "zdobersek"),
-]
-
-
-# This is intended as a canonical, machine-readable list of all reviewers for
-# WebKit. If your name is missing here and you are a reviewer, please add it.
-# No review needed.
-
-
-reviewers_list = [
- Reviewer("Abhishek Arya", "inferno@chromium.org", "inferno-sec"),
- Reviewer("Ada Chan", "adachan@apple.com", "chanada"),
- Reviewer("Adam Barth", "abarth@webkit.org", "abarth"),
- Reviewer("Adam Roben", ["aroben@webkit.org", "aroben@apple.com"], "aroben"),
- Reviewer("Adam Treat", ["treat@kde.org", "treat@webkit.org", "atreat@rim.com"], "manyoso"),
- Reviewer("Adele Peterson", "adele@apple.com", "adele"),
- Reviewer("Adrienne Walker", ["enne@google.com", "enne@chromium.org"], "enne"),
- Reviewer("Alejandro G. Castro", ["alex@igalia.com", "alex@webkit.org"], "alexg__"),
- Reviewer("Alexander Pavlov", ["apavlov@chromium.org", "pavlov81@gmail.com"], "apavlov"),
- Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"], "ap"),
- Reviewer("Alexis Menard", ["alexis@webkit.org", "menard@kde.org"], "darktears"),
- Reviewer("Alice Liu", "alice.liu@apple.com", "aliu"),
- Reviewer("Alp Toker", ["alp@nuanti.com", "alp@atoker.com", "alp@webkit.org"], "alp"),
- Reviewer("Anders Carlsson", ["andersca@apple.com", "acarlsson@apple.com"], "andersca"),
- Reviewer("Andreas Kling", ["akling@apple.com", "kling@webkit.org", "awesomekling@apple.com", "andreas.kling@nokia.com"], "kling"),
- Reviewer("Andy Estes", "aestes@apple.com", "estes"),
- Reviewer("Antonio Gomes", ["tonikitoo@webkit.org", "agomes@rim.com"], "tonikitoo"),
- Reviewer("Antti Koivisto", ["koivisto@iki.fi", "antti@apple.com", "antti.j.koivisto@nokia.com"], "anttik"),
- Reviewer("Ariya Hidayat", ["ariya.hidayat@gmail.com", "ariya@sencha.com", "ariya@webkit.org"], "ariya"),
- Reviewer("Benjamin Poulain", ["benjamin@webkit.org", "benjamin.poulain@nokia.com", "ikipou@gmail.com"], "benjaminp"),
- Reviewer("Beth Dakin", "bdakin@apple.com", "dethbakin"),
- Reviewer("Brady Eidson", "beidson@apple.com", "bradee-oh"),
- Reviewer("Brent Fulgham", "bfulgham@webkit.org", "bfulgham"),
- Reviewer("Brian Weinstein", "bweinstein@apple.com", "bweinstein"),
- Reviewer("Caio Marcelo de Oliveira Filho", ["cmarcelo@webkit.org", "caio.oliveira@openbossa.org"], "cmarcelo"),
- Reviewer("Cameron Zwarich", ["zwarich@apple.com", "cwzwarich@apple.com", "cwzwarich@webkit.org"]),
- Reviewer("Carlos Garcia Campos", ["cgarcia@igalia.com", "carlosgc@gnome.org", "carlosgc@webkit.org"], "KaL"),
- Reviewer("Chang Shu", ["cshu@webkit.org", "c.shu@sisa.samsung.com"], "cshu"),
- Reviewer("Chris Blumenberg", "cblu@apple.com", "cblu"),
- Reviewer("Chris Marrin", "cmarrin@apple.com", "cmarrin"),
- Reviewer("Chris Fleizach", "cfleizach@apple.com", "cfleizach"),
- Reviewer("Chris Jerdonek", "cjerdonek@webkit.org", "cjerdonek"),
- Reviewer("Chris Rogers", "crogers@google.com", "crogers"),
- Reviewer(u"Csaba Osztrogon\u00e1c", "ossy@webkit.org", "ossy"),
- Reviewer("Dan Bernstein", ["mitz@webkit.org", "mitz@apple.com"], "mitzpettel"),
- Reviewer("Daniel Bates", ["dbates@webkit.org", "dbates@rim.com"], "dydz"),
- Reviewer("Darin Adler", "darin@apple.com", "darin"),
- Reviewer("Darin Fisher", ["fishd@chromium.org", "darin@chromium.org"], "fishd"),
- Reviewer("David Harrison", "harrison@apple.com", "harrison"),
- Reviewer("David Hyatt", "hyatt@apple.com", ["dhyatt", "hyatt"]),
- Reviewer("David Kilzer", ["ddkilzer@webkit.org", "ddkilzer@apple.com"], "ddkilzer"),
- Reviewer("David Levin", "levin@chromium.org", "dave_levin"),
- Reviewer("Dean Jackson", "dino@apple.com", "dino"),
- Reviewer("Dimitri Glazkov", "dglazkov@chromium.org", "dglazkov"),
- Reviewer("Dirk Pranke", "dpranke@chromium.org", "dpranke"),
- Reviewer("Dirk Schulze", "krit@webkit.org", "krit"),
- Reviewer("Dmitry Titov", "dimich@chromium.org", "dimich"),
- Reviewer("Don Melton", "gramps@apple.com", "gramps"),
- Reviewer("Dumitru Daniliuc", "dumi@chromium.org", "dumi"),
- Reviewer("Emil A Eklund", "eae@chromium.org", "eae"),
- Reviewer("Enrica Casucci", "enrica@apple.com", "enrica"),
- Reviewer("Eric Carlson", "eric.carlson@apple.com", "eric_carlson"),
- Reviewer("Eric Seidel", "eric@webkit.org", "eseidel"),
- Reviewer("Filip Pizlo", "fpizlo@apple.com", "pizlo"),
- Reviewer("Gavin Barraclough", "barraclough@apple.com", "gbarra"),
- Reviewer("Geoffrey Garen", "ggaren@apple.com", "ggaren"),
- Reviewer("George Staikos", ["staikos@kde.org", "staikos@webkit.org"]),
- Reviewer("Gustavo Noronha Silva", ["gns@gnome.org", "kov@webkit.org", "gustavo.noronha@collabora.co.uk", "gustavo.noronha@collabora.com"], "kov"),
- Reviewer("Gyuyoung Kim", ["gyuyoung.kim@samsung.com", "gyuyoung.kim@webkit.org"], "gyuyoung"),
- Reviewer("Hajime Morita", ["morrita@google.com", "morrita@chromium.org"], "morrita"),
- Reviewer("Holger Freyther", ["zecke@selfish.org", "zecke@webkit.org"], "zecke"),
- Reviewer("James Robinson", ["jamesr@chromium.org", "jamesr@google.com"], "jamesr"),
- Reviewer("Jan Alonzo", ["jmalonzo@gmail.com", "jmalonzo@webkit.org"], "janm"),
- Reviewer("Jeremy Orlow", ["jorlow@webkit.org", "jorlow@chromium.org"], "jorlow"),
- Reviewer("Jessie Berlin", ["jberlin@webkit.org", "jberlin@apple.com"], "jessieberlin"),
- Reviewer("Jian Li", "jianli@chromium.org", "jianli"),
- Reviewer("Jocelyn Turcotte", ["jocelyn.turcotte@digia.com", "jocelyn.turcotte@nokia.com"], "jturcotte"),
- Reviewer("Jochen Eisinger", "jochen@chromium.org", "jochen__"),
- Reviewer("John Sullivan", "sullivan@apple.com", "sullivan"),
- Reviewer("Jon Honeycutt", "jhoneycutt@apple.com", "jhoneycutt"),
- Reviewer("Joseph Pecoraro", ["joepeck@webkit.org", "pecoraro@apple.com"], "JoePeck"),
- Reviewer("Julien Chaffraix", ["jchaffraix@webkit.org", "julien.chaffraix@gmail.com", "jchaffraix@google.com", "jchaffraix@codeaurora.org"], "jchaffraix"),
- Reviewer("Justin Garcia", "justin.garcia@apple.com", "justing"),
- Reviewer("Ken Kocienda", "kocienda@apple.com"),
- Reviewer("Kenneth Rohde Christiansen", ["kenneth@webkit.org", "kenneth.r.christiansen@intel.com", "kenneth.christiansen@gmail.com"], ["kenneth_", "kenneth", "kenne"]),
- Reviewer("Kenneth Russell", ["kbr@google.com", "kbr@chromium.org"], ["kbr_google", "kbrgg"]),
- Reviewer("Kent Tamura", ["tkent@chromium.org", "tkent@google.com"], "tkent"),
- Reviewer("Kentaro Hara", ["haraken@chromium.org"], "haraken"),
- Reviewer("Kevin Decker", "kdecker@apple.com", "superkevin"),
- Reviewer("Kevin McCullough", "kmccullough@apple.com", "maculloch"),
- Reviewer("Kevin Ollivier", ["kevino@theolliviers.com", "kevino@webkit.org"], "kollivier"),
- Reviewer("Lars Knoll", ["lars@trolltech.com", "lars@kde.org", "lars.knoll@nokia.com"], "lars"),
- Reviewer("Laszlo Gombos", ["laszlo.gombos@webkit.org", "l.gombos@samsung.com", "laszlo.1.gombos@nokia.com"], "lgombos"),
- Reviewer("Levi Weintraub", ["leviw@chromium.org", "leviw@google.com", "lweintraub@apple.com"], "leviw"),
- Reviewer("Luiz Agostini", ["luiz@webkit.org", "luiz.agostini@openbossa.org"], "lca"),
- Reviewer("Maciej Stachowiak", "mjs@apple.com", "othermaciej"),
- Reviewer("Mark Hahnenberg", "mhahnenberg@apple.com", "mhahnenberg"),
- Reviewer("Mark Rowe", "mrowe@apple.com", "bdash"),
- Reviewer("Martin Robinson", ["mrobinson@webkit.org", "mrobinson@igalia.com", "martin.james.robinson@gmail.com"], "mrobinson"),
- Reviewer("Michael Saboff", "msaboff@apple.com", "msaboff"),
- Reviewer("Mihai Parparita", "mihaip@chromium.org", "mihaip"),
- Reviewer("Nate Chapin", "japhet@chromium.org", ["japhet", "natechapin"]),
- Reviewer("Nikolas Zimmermann", ["zimmermann@kde.org", "zimmermann@physik.rwth-aachen.de", "zimmermann@webkit.org", "nzimmermann@rim.com"], "wildfox"),
- Reviewer("Noam Rosenthal", ["noam@webkit.org", "noam.rosenthal@nokia.com"], "noamr"),
- Reviewer("Ojan Vafai", "ojan@chromium.org", "ojan"),
- Reviewer("Oliver Hunt", "oliver@apple.com", "olliej"),
- Reviewer("Pavel Feldman", ["pfeldman@chromium.org", "pfeldman@google.com"], "pfeldman"),
- Reviewer("Philippe Normand", ["pnormand@igalia.com", "philn@webkit.org", "philn@igalia.com"], ["philn-tp", "pnormand"]),
- Reviewer("Richard Williamson", "rjw@apple.com", "rjw"),
- Reviewer("Rob Buis", ["rwlbuis@gmail.com", "rwlbuis@webkit.org", "rbuis@rim.com"], "rwlbuis"),
- Reviewer("Ryosuke Niwa", "rniwa@webkit.org", "rniwa"),
- Reviewer("Sam Weinig", ["sam@webkit.org", "weinig@apple.com"], "weinig"),
- Reviewer("Shinichiro Hamaji", "hamaji@chromium.org", "hamaji"),
- Reviewer("Simon Fraser", "simon.fraser@apple.com", "smfr"),
- Reviewer("Simon Hausmann", ["hausmann@webkit.org", "hausmann@kde.org", "simon.hausmann@digia.com"], "tronical"),
- Reviewer("Stephanie Lewis", "slewis@apple.com", "sundiamonde"),
- Reviewer("Stephen White", "senorblanco@chromium.org", "senorblanco"),
- Reviewer("Steve Block", "steveblock@google.com", "steveblock"),
- Reviewer("Steve Falkenburg", "sfalken@apple.com", "sfalken"),
- Reviewer("Tim Omernick", "timo@apple.com"),
- Reviewer("Timothy Hatcher", ["timothy@apple.com", "timothy@hatcher.name"], "xenon"),
- Reviewer("Tim Horton", "timothy_horton@apple.com", "thorton"),
- Reviewer("Tony Chang", "tony@chromium.org", "tony^work"),
- Reviewer("Tony Gentilcore", "tonyg@chromium.org", "tonyg-cr"),
- Reviewer(u"Tor Arne Vestb\u00f8", ["vestbo@webkit.org", "tor.arne.vestbo@nokia.com"], "torarne"),
- Reviewer("Vicki Murley", "vicki@apple.com"),
- Reviewer("Vsevolod Vlasov", "vsevik@chromium.org", "vsevik"),
- Reviewer("Xan Lopez", ["xan.lopez@gmail.com", "xan@gnome.org", "xan@webkit.org", "xlopez@igalia.com"], "xan"),
- Reviewer("Yong Li", ["yoli@rim.com", "yong.li.webkit@gmail.com"], "yoli"),
- Reviewer("Yury Semikhatsky", "yurys@chromium.org", "yurys"),
- Reviewer("Yuta Kitamura", "yutak@chromium.org", "yutak"),
- Reviewer("Zack Rusin", "zack@kde.org", "zackr"),
- Reviewer("Zoltan Herczeg", ["zherczeg@webkit.org", "zherczeg@inf.u-szeged.hu"], "zherczeg"),
-]
-
class CommitterList(object):
# Committers and reviewers are passed in to allow easy testing
def __init__(self,
- committers=committers_unable_to_review,
- reviewers=reviewers_list,
- contributors=contributors_who_are_not_committers,
- watchers=watchers_who_are_not_contributors):
- self._accounts = watchers + contributors + committers + reviewers
+ committers=[],
+ reviewers=[],
+ contributors=[]):
+ # FIXME: These arguments only exist for testing. Clean it up.
+ if not (committers or reviewers or contributors):
+ loaded_data = self.load_json()
+ contributors = loaded_data['Contributors']
+ committers = loaded_data['Committers']
+ reviewers = loaded_data['Reviewers']
+
self._contributors = contributors + committers + reviewers
self._committers = committers + reviewers
self._reviewers = reviewers
@@ -579,8 +127,18 @@ class CommitterList(object):
self._accounts_by_email = {}
self._accounts_by_login = {}
- def accounts(self):
- return self._accounts
+ @staticmethod
+ @memoized
+ def load_json():
+ filesystem = FileSystem()
+ json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'contributors.json')
+ contributors = json.loads(filesystem.read_text_file(json_path))
+
+ return {
+ 'Contributors': [Contributor(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Contributors'].iteritems()],
+ 'Committers': [Committer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Committers'].iteritems()],
+ 'Reviewers': [Reviewer(name, data.get('emails'), data.get('nicks')) for name, data in contributors['Reviewers'].iteritems()],
+ }
def contributors(self):
return self._contributors
@@ -601,7 +159,7 @@ class CommitterList(object):
def _email_to_account_map(self):
if not len(self._accounts_by_email):
- for account in self._accounts:
+ for account in self._contributors:
for email in account.emails:
assert(email not in self._accounts_by_email) # We should never have duplicate emails.
self._accounts_by_email[email] = account
@@ -609,18 +167,13 @@ class CommitterList(object):
def _login_to_account_map(self):
if not len(self._accounts_by_login):
- for account in self._accounts:
+ for account in self._contributors:
if account.emails:
login = account.bugzilla_email()
assert(login not in self._accounts_by_login) # We should never have duplicate emails.
self._accounts_by_login[login] = account
return self._accounts_by_login
- def _contributor_only(self, record):
- if record and not record.is_contributor:
- return None
- return record
-
def _committer_only(self, record):
if record and not record.can_commit:
return None
@@ -642,7 +195,8 @@ class CommitterList(object):
return None
def contributors_by_search_string(self, string):
- return filter(lambda contributor: contributor.contains_string(string), self.contributors())
+ glob_matches = filter(lambda contributor: contributor.matches_glob(string), self.contributors())
+ return glob_matches or filter(lambda contributor: contributor.contains_string(string), self.contributors())
def contributors_by_email_username(self, string):
string = string + '@'
@@ -673,7 +227,7 @@ class CommitterList(object):
string_in_lowercase = string.lower()
# 1. Exact match for fullname, email and irc_nicknames
- account = self.contributor_by_name(string_in_lowercase) or self.account_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
+ account = self.contributor_by_name(string_in_lowercase) or self.contributor_by_email(string_in_lowercase) or self.contributor_by_irc_nickname(string_in_lowercase)
if account:
return [account], 0
@@ -706,20 +260,14 @@ class CommitterList(object):
return [], len(string)
return contributorWithMinDistance, minDistance
- def account_by_login(self, login):
- return self._login_to_account_map().get(login.lower()) if login else None
-
- def account_by_email(self, email):
+ def contributor_by_email(self, email):
return self._email_to_account_map().get(email.lower()) if email else None
def contributor_by_name(self, name):
return self._name_to_contributor_map().get(name.lower()) if name else None
- def contributor_by_email(self, email):
- return self._contributor_only(self.account_by_email(email))
-
def committer_by_email(self, email):
- return self._committer_only(self.account_by_email(email))
+ return self._committer_only(self.contributor_by_email(email))
def reviewer_by_email(self, email):
- return self._reviewer_only(self.account_by_email(email))
+ return self._reviewer_only(self.contributor_by_email(email))
diff --git a/Tools/Scripts/webkitpy/common/config/committers_unittest.py b/Tools/Scripts/webkitpy/common/config/committers_unittest.py
index 1c8c86a1a..f23c5fbda 100644
--- a/Tools/Scripts/webkitpy/common/config/committers_unittest.py
+++ b/Tools/Scripts/webkitpy/common/config/committers_unittest.py
@@ -25,22 +25,20 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
-from webkitpy.common.config.committers import Account, CommitterList, Contributor, Committer, Reviewer
+import unittest2 as unittest
+from webkitpy.common.config.committers import CommitterList, Contributor, Committer, Reviewer
class CommittersTest(unittest.TestCase):
def test_committer_lookup(self):
- account = Account('Test Zero', ['zero@test.com', 'zero@gmail.com'], 'zero')
committer = Committer('Test One', 'one@test.com', 'one')
reviewer = Reviewer('Test Two', ['two@test.com', 'Two@rad.com', 'so_two@gmail.com'])
contributor = Contributor('Test Three', ['Three@test.com'], 'three')
contributor_with_two_nicknames = Contributor('Other Four', ['otherfour@webkit.org', 'otherfour@webkit2.org'], ['four', 'otherfour'])
contributor_with_same_email_username = Contributor('Yet Another Four', ['otherfour@webkit.com'], ['yetanotherfour'])
- committer_list = CommitterList(watchers=[account], committers=[committer], reviewers=[reviewer],
+ committer_list = CommitterList(committers=[committer], reviewers=[reviewer],
contributors=[contributor, contributor_with_two_nicknames, contributor_with_same_email_username])
# Test valid committer, reviewer and contributor lookup
- self.assertEqual(committer_list.account_by_email('zero@test.com'), account)
self.assertEqual(committer_list.committer_by_email('one@test.com'), committer)
self.assertEqual(committer_list.reviewer_by_email('two@test.com'), reviewer)
self.assertEqual(committer_list.committer_by_email('two@test.com'), reviewer)
@@ -51,7 +49,7 @@ class CommittersTest(unittest.TestCase):
# Test valid committer, reviewer and contributor lookup
self.assertEqual(committer_list.committer_by_name("Test One"), committer)
self.assertEqual(committer_list.committer_by_name("Test Two"), reviewer)
- self.assertEqual(committer_list.committer_by_name("Test Three"), None)
+ self.assertIsNone(committer_list.committer_by_name("Test Three"))
self.assertEqual(committer_list.contributor_by_name("Test Three"), contributor)
self.assertEqual(committer_list.contributor_by_name("test one"), committer)
self.assertEqual(committer_list.contributor_by_name("test two"), reviewer)
@@ -60,23 +58,15 @@ class CommittersTest(unittest.TestCase):
# Test that the first email is assumed to be the Bugzilla email address (for now)
self.assertEqual(committer_list.committer_by_email('two@rad.com').bugzilla_email(), 'two@test.com')
- # Test lookup by login email address
- self.assertEqual(committer_list.account_by_login('zero@test.com'), account)
- self.assertEqual(committer_list.account_by_login('zero@gmail.com'), None)
- self.assertEqual(committer_list.account_by_login('one@test.com'), committer)
- self.assertEqual(committer_list.account_by_login('two@test.com'), reviewer)
- self.assertEqual(committer_list.account_by_login('Two@rad.com'), None)
- self.assertEqual(committer_list.account_by_login('so_two@gmail.com'), None)
-
# Test that a known committer is not returned during reviewer lookup
- self.assertEqual(committer_list.reviewer_by_email('one@test.com'), None)
- self.assertEqual(committer_list.reviewer_by_email('three@test.com'), None)
+ self.assertIsNone(committer_list.reviewer_by_email('one@test.com'))
+ self.assertIsNone(committer_list.reviewer_by_email('three@test.com'))
# and likewise that a known contributor is not returned for committer lookup.
- self.assertEqual(committer_list.committer_by_email('three@test.com'), None)
+ self.assertIsNone(committer_list.committer_by_email('three@test.com'))
# Test that unknown email address fail both committer and reviewer lookup
- self.assertEqual(committer_list.committer_by_email('bar@bar.com'), None)
- self.assertEqual(committer_list.reviewer_by_email('bar@bar.com'), None)
+ self.assertIsNone(committer_list.committer_by_email('bar@bar.com'))
+ self.assertIsNone(committer_list.reviewer_by_email('bar@bar.com'))
# Test that emails returns a list.
self.assertEqual(committer.emails, ['one@test.com'])
@@ -95,6 +85,8 @@ class CommittersTest(unittest.TestCase):
self.assertEqual(committer_list.contributors_by_search_string('test'), [contributor, committer, reviewer])
self.assertEqual(committer_list.contributors_by_search_string('rad'), [reviewer])
self.assertEqual(committer_list.contributors_by_search_string('Two'), [reviewer])
+ self.assertEqual(committer_list.contributors_by_search_string('otherfour'), [contributor_with_two_nicknames])
+ self.assertEqual(committer_list.contributors_by_search_string('*otherfour*'), [contributor_with_two_nicknames, contributor_with_same_email_username])
self.assertEqual(committer_list.contributors_by_email_username("one"), [committer])
self.assertEqual(committer_list.contributors_by_email_username("four"), [])
@@ -109,6 +101,11 @@ class CommittersTest(unittest.TestCase):
expected_names = [name_of_expected_contributor] if name_of_expected_contributor else []
self.assertEqual(([contributor.full_name for contributor in contributors], distance), (expected_names, expected_distance))
+ # Test that the string representation of a Contributor supports unicode
+ def test_contributor_encoding(self):
+ committer_encoding = Contributor(u'\u017dan M\u00fcller', 'zmuller@example.com', 'zmuller')
+ self.assertTrue(str(committer_encoding))
+
# Basic testing of the edit distance matching ...
def test_contributors_by_fuzzy_match(self):
self._assert_fuzz_match('Geoff Garen', 'Geoffrey Garen', 3)
diff --git a/Tools/Scripts/webkitpy/common/config/committervalidator.py b/Tools/Scripts/webkitpy/common/config/committervalidator.py
index 6cec3da8c..89a4866be 100644
--- a/Tools/Scripts/webkitpy/common/config/committervalidator.py
+++ b/Tools/Scripts/webkitpy/common/config/committervalidator.py
@@ -78,18 +78,20 @@ class CommitterValidator(object):
attachment_id,
additional_comment_text=None):
comment_text = "Rejecting attachment %s from commit-queue." % attachment_id
+ if additional_comment_text:
+ comment_text += "\n\n%s" % additional_comment_text
self.host.bugs.set_flag_on_attachment(attachment_id,
"commit-queue",
"-",
- comment_text,
- additional_comment_text)
+ comment_text)
def reject_patch_from_review_queue(self,
attachment_id,
additional_comment_text=None):
comment_text = "Rejecting attachment %s from review queue." % attachment_id
+ if additional_comment_text:
+ comment_text += "\n\n%s" % additional_comment_text
self.host.bugs.set_flag_on_attachment(attachment_id,
'review',
'-',
- comment_text,
- additional_comment_text)
+ comment_text)
diff --git a/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py b/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py
index 232f0771e..e8aa88721 100644
--- a/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py
+++ b/Tools/Scripts/webkitpy/common/config/committervalidator_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from .committervalidator import CommitterValidator
@@ -41,4 +41,4 @@ class CommitterValidatorTest(unittest.TestCase):
- If you do not have review rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
- If you have review rights please correct the error in Tools/Scripts/webkitpy/common/config/committers.py by adding yourself to the file (no review needed). The commit-queue restarts itself every 2 hours. After restart the commit-queue will correctly respect your review rights."""
- self.assertEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage)
+ self.assertMultiLineEqual(validator._flag_permission_rejection_message("foo@foo.com", "review"), expected_messsage)
diff --git a/Tools/Scripts/webkitpy/common/config/contributionareas.py b/Tools/Scripts/webkitpy/common/config/contributionareas.py
index b48df2a55..effcd22a0 100644
--- a/Tools/Scripts/webkitpy/common/config/contributionareas.py
+++ b/Tools/Scripts/webkitpy/common/config/contributionareas.py
@@ -131,7 +131,6 @@ contribution_areas = [
_Area('Scrollbars', ['scroll']),
_Area('Security'), # Probably need more tokens
# FIXME: 'Shadow DOM'
- _Area('Skia'),
_Area('Soup Network Backend', ['soup']),
# FIXME: 'Spell Checking' just need tokens
_Area('Tables', ['htmltable', 'rendertable']),
@@ -144,7 +143,6 @@ contribution_areas = [
_Area('The QtWebKit Port', ['qt']),
_Area('The WinCE Port', ['wince']),
_Area('The WinCairo Port', ['cairo']),
- _Area('The wxWebKit Port', ['wx']),
_Area('Threading', ['thread']),
_Area('Tools'),
_Area('Touch Support', ['touch']),
diff --git a/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py b/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py
index c3960d9c1..17d4fe718 100644
--- a/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py
+++ b/Tools/Scripts/webkitpy/common/config/contributionareas_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from .contributionareas import _Intersection
from .contributionareas import _Area
diff --git a/Tools/Scripts/webkitpy/common/config/contributors.json b/Tools/Scripts/webkitpy/common/config/contributors.json
new file mode 100644
index 000000000..d2fb18910
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/contributors.json
@@ -0,0 +1,4184 @@
+{
+ "Committers" : {
+ "Aaron Boodman" : {
+ "emails" : [
+ "aa@chromium.org"
+ ],
+ "nicks" : [
+ "aboodman"
+ ]
+ },
+ "Aaron Colwell" : {
+ "emails" : [
+ "acolwell@chromium.org"
+ ],
+ "nicks" : [
+ "acolwell"
+ ]
+ },
+ "Adam Bergkvist" : {
+ "emails" : [
+ "adam.bergkvist@ericsson.com"
+ ],
+ "nicks" : [
+ "adambe"
+ ]
+ },
+ "Adam Kallai" : {
+ "emails" : [
+ "kadam@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "kadam"
+ ]
+ },
+ "Adam Klein" : {
+ "emails" : [
+ "adamk@chromium.org"
+ ],
+ "nicks" : [
+ "aklein"
+ ]
+ },
+ "Adam Langley" : {
+ "emails" : [
+ "agl@chromium.org"
+ ],
+ "expertise" : "All Chromium Linux Code (yes, all of it)",
+ "nicks" : [
+ "agl"
+ ]
+ },
+ "Ademar de Souza Reis Jr" : {
+ "emails" : [
+ "ademar.reis@gmail.com",
+ "ademar@webkit.org"
+ ],
+ "nicks" : [
+ "ademar"
+ ]
+ },
+ "Adenilson Cavalcanti" : {
+ "emails" : [
+ "cavalcantii@gmail.com"
+ ],
+ "nicks" : [
+ "Savago"
+ ]
+ },
+ "Albert J. Wong" : {
+ "emails" : [
+ "ajwong@chromium.org"
+ ]
+ },
+ "Alec Flett" : {
+ "emails" : [
+ "alecflett@chromium.org",
+ "alecflett@google.com"
+ ],
+ "nicks" : [
+ "alecf"
+ ]
+ },
+ "Alex Christensen" : {
+ "emails" : [
+ "achristensen@apple.com",
+ "alex.christensen@flexsim.com"
+ ],
+ "expertise" : "Win64",
+ "nicks" : [
+ "alexchristensen"
+ ]
+ },
+ "Alexander F\u00e6r\u00f8y" : {
+ "emails" : [
+ "ahf@0x90.dk",
+ "alexander.faeroy@nokia.com"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "ahf"
+ ]
+ },
+ "Alexander Kellett" : {
+ "emails" : [
+ "lypanov@mac.com",
+ "a-lists001@lypanov.net",
+ "lypanov@kde.org"
+ ],
+ "nicks" : [
+ "lypanov"
+ ]
+ },
+ "Alexandre Elias" : {
+ "emails" : [
+ "aelias@chromium.org",
+ "aelias@google.com"
+ ],
+ "nicks" : [
+ "aelias"
+ ]
+ },
+ "Alice Boxhall" : {
+ "emails" : [
+ "aboxhall@chromium.org"
+ ],
+ "nicks" : [
+ "aboxhall"
+ ]
+ },
+ "Alok Priyadarshi" : {
+ "emails" : [
+ "alokp@chromium.org"
+ ],
+ "nicks" : [
+ "alokp"
+ ]
+ },
+ "Ami Fischman" : {
+ "emails" : [
+ "fischman@chromium.org",
+ "fischman@google.com"
+ ],
+ "nicks" : [
+ "fischman"
+ ]
+ },
+ "Amruth Raj" : {
+ "emails" : [
+ "amruthraj@motorola.com"
+ ],
+ "nicks" : [
+ "amruthraj"
+ ]
+ },
+ "Andras Becsi" : {
+ "emails" : [
+ "abecsi@webkit.org",
+ "andras.becsi@digia.com"
+ ],
+ "expertise" : "The QtWebKit Port, Tools, Layout and Rendering",
+ "nicks" : [
+ "bbandix"
+ ]
+ },
+ "Andre Boule" : {
+ "emails" : [
+ "aboule@apple.com"
+ ]
+ },
+ "Andrei Bucur" : {
+ "emails" : [
+ "abucur@adobe.com"
+ ],
+ "nicks" : [
+ "abucur"
+ ]
+ },
+ "Andrei Popescu" : {
+ "emails" : [
+ "andreip@google.com"
+ ],
+ "nicks" : [
+ "andreip"
+ ]
+ },
+ "Andrew Lo" : {
+ "emails" : [
+ "anlo@blackberry.com",
+ "anlo@rim.com",
+ "andrewlo@gmail.com"
+ ],
+ "nicks" : [
+ "andrewlo"
+ ]
+ },
+ "Andrew Scherkus" : {
+ "emails" : [
+ "scherkus@chromium.org"
+ ],
+ "nicks" : [
+ "scherkus"
+ ]
+ },
+ "Andrew Wellington" : {
+ "emails" : [
+ "andrew@webkit.org",
+ "proton@wiretapped.net"
+ ],
+ "nicks" : [
+ "proton"
+ ]
+ },
+ "Andrey Adaykin" : {
+ "emails" : [
+ "aandrey@chromium.org"
+ ],
+ "expertise" : "Developer Tools, Web Inspector",
+ "nicks" : [
+ "aandrey"
+ ]
+ },
+ "Andrey Kosyakov" : {
+ "emails" : [
+ "caseq@chromium.org"
+ ],
+ "nicks" : [
+ "caseq"
+ ]
+ },
+ "Andy Wingo" : {
+ "emails" : [
+ "wingo@igalia.com"
+ ],
+ "expertise" : "JavaScriptCore, the WebKitGTK+ port",
+ "nicks" : [
+ "wingo"
+ ]
+ },
+ "Anna Cavender" : {
+ "emails" : [
+ "annacc@chromium.org"
+ ],
+ "nicks" : [
+ "annacc"
+ ]
+ },
+ "Anthony Ricaud" : {
+ "emails" : [
+ "rik@webkit.org"
+ ],
+ "expertise" : "Web Inspector",
+ "nicks" : [
+ "rik"
+ ]
+ },
+ "Antoine Labour" : {
+ "emails" : [
+ "piman@chromium.org"
+ ],
+ "nicks" : [
+ "piman"
+ ]
+ },
+ "Antoine Quint" : {
+ "emails" : [
+ "graouts@apple.com"
+ ],
+ "nicks" : [
+ "graouts"
+ ]
+ },
+ "Anton D'Auria" : {
+ "emails" : [
+ "adauria@apple.com"
+ ],
+ "nicks" : [
+ "antonlefou"
+ ]
+ },
+ "Anton Muhin" : {
+ "emails" : [
+ "antonm@chromium.org"
+ ],
+ "nicks" : [
+ "antonm"
+ ]
+ },
+ "Arko Saha" : {
+ "emails" : [
+ "arko@motorola.com"
+ ],
+ "nicks" : [
+ "arkos"
+ ]
+ },
+ "Arno Renevier" : {
+ "emails" : [
+ "a.renevier@samsung.com"
+ ],
+ "nicks" : [
+ "arno"
+ ]
+ },
+ "Arpita Bahuguna" : {
+ "emails" : [
+ "a.bah@samsung.com"
+ ],
+ "nicks" : [
+ "arpitab"
+ ]
+ },
+ "Arvid Nilsson" : {
+ "emails" : [
+ "anilsson@blackberry.com",
+ "anilsson@rim.com"
+ ],
+ "nicks" : [
+ "anilsson"
+ ]
+ },
+ "Balazs Kelemen" : {
+ "emails" : [
+ "kbalazs@webkit.org",
+ "b.kelemen@sisa.samsung.com"
+ ],
+ "expertise" : "The QtWebKit Port, WebKit2",
+ "nicks" : [
+ "kbalazs"
+ ]
+ },
+ "Bear Travis" : {
+ "emails" : [
+ "betravis@adobe.com"
+ ],
+ "nicks" : [
+ "betravis"
+ ]
+ },
+ "Ben Murdoch" : {
+ "emails" : [
+ "benm@google.com"
+ ],
+ "nicks" : [
+ "benm"
+ ]
+ },
+ "Ben Wells" : {
+ "emails" : [
+ "benwells@chromium.org"
+ ],
+ "nicks" : [
+ "benwells"
+ ]
+ },
+ "Benjamin C Meyer" : {
+ "emails" : [
+ "ben@meyerhome.net",
+ "ben@webkit.org"
+ ],
+ "nicks" : [
+ "icefox"
+ ]
+ },
+ "Benjamin Kalman" : {
+ "emails" : [
+ "kalman@chromium.org",
+ "kalman@google.com"
+ ],
+ "nicks" : [
+ "kalman"
+ ]
+ },
+ "Benjamin Otte" : {
+ "emails" : [
+ "otte@gnome.org",
+ "otte@webkit.org"
+ ],
+ "expertise" : "WebKitGTK+ port, GTK+ lead developer",
+ "nicks" : [
+ "otte"
+ ]
+ },
+ "Bill Budge" : {
+ "emails" : [
+ "bbudge@chromium.org",
+ "bbudge@gmail.com"
+ ],
+ "nicks" : [
+ "bbudge"
+ ]
+ },
+ "Brett Wilson" : {
+ "emails" : [
+ "brettw@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Graphics, Skia, URL Parsing",
+ "nicks" : [
+ "brettx"
+ ]
+ },
+ "Bruno de Oliveira Abinader" : {
+ "emails" : [
+ "bruno.d@partner.samsung.com",
+ "bruno.abinader@basyskom.com",
+ "brunoabinader@gmail.com"
+ ],
+ "expertise" : "The QtWebKit Port, CSS, Layout and Rendering",
+ "nicks" : [
+ "abinader"
+ ]
+ },
+ "Byungwoo Lee" : {
+ "emails" : [
+ "bw80.lee@samsung.com",
+ "bw80.lee@gmail.com"
+ ],
+ "expertise" : "The EFLWebKit Port",
+ "nicks" : [
+ "byungwoo"
+ ]
+ },
+ "Cameron McCormack" : {
+ "emails" : [
+ "cam@mcc.id.au",
+ "cam@webkit.org"
+ ],
+ "nicks" : [
+ "heycam"
+ ]
+ },
+ "Carol Szabo" : {
+ "emails" : [
+ "carol@webkit.org",
+ "carol.szabo@nokia.com"
+ ],
+ "nicks" : [
+ "cszabo1"
+ ]
+ },
+ "Cary Clark" : {
+ "emails" : [
+ "caryclark@google.com",
+ "caryclark@chromium.org"
+ ],
+ "nicks" : [
+ "caryclark"
+ ]
+ },
+ "Charles Reis" : {
+ "emails" : [
+ "creis@chromium.org"
+ ],
+ "nicks" : [
+ "creis"
+ ]
+ },
+ "Charles Wei" : {
+ "emails" : [
+ "charles.wei@torchmobile.com.cn"
+ ],
+ "nicks" : [
+ "cswei"
+ ]
+ },
+ "Chris Evans" : {
+ "emails" : [
+ "cevans@google.com",
+ "cevans@chromium.org"
+ ],
+ "expertise" : "Security"
+ },
+ "Chris Guillory" : {
+ "emails" : [
+ "ctguil@chromium.org",
+ "chris.guillory@google.com"
+ ],
+ "nicks" : [
+ "ctguil"
+ ]
+ },
+ "Chris Petersen" : {
+ "emails" : [
+ "cpetersen@apple.com"
+ ],
+ "expertise" : "Performance testing, Qualification testing",
+ "nicks" : [
+ "cpetersen"
+ ]
+ },
+ "Christian Dywan" : {
+ "emails" : [
+ "christian@twotoasts.de",
+ "christian@webkit.org",
+ "christian@lanedo.com"
+ ]
+ },
+ "Claudio Saavedra" : {
+ "emails" : [
+ "csaavedra@igalia.com"
+ ],
+ "expertise" : "WebKitGTK+ port, Epiphany developer, HTML Editing",
+ "nicks" : [
+ "claudio___"
+ ]
+ },
+ "Collin Jackson" : {
+ "emails" : [
+ "collinj@webkit.org"
+ ],
+ "nicks" : [
+ "collinjackson"
+ ]
+ },
+ "Cris Neckar" : {
+ "emails" : [
+ "cdn@chromium.org"
+ ],
+ "nicks" : [
+ "cneckar"
+ ]
+ },
+ "Dan Winship" : {
+ "emails" : [
+ "danw@gnome.org"
+ ],
+ "nicks" : [
+ "danw"
+ ]
+ },
+ "Dana Jansens" : {
+ "emails" : [
+ "danakj@chromium.org"
+ ],
+ "nicks" : [
+ "danakj"
+ ]
+ },
+ "Daniel Cheng" : {
+ "emails" : [
+ "dcheng@chromium.org"
+ ],
+ "nicks" : [
+ "dcheng"
+ ]
+ },
+ "Dave Barton" : {
+ "emails" : [
+ "dbarton@mathscribe.com"
+ ],
+ "expertise" : "MathML",
+ "nicks" : [
+ "davebarton"
+ ]
+ },
+ "Dave Tharp" : {
+ "emails" : [
+ "dtharp@codeaurora.org"
+ ],
+ "nicks" : [
+ "dtharp"
+ ]
+ },
+ "David Farler" : {
+ "emails" : [
+ "dfarler@apple.com"
+ ],
+ "nicks" : [
+ "dfarler"
+ ]
+ },
+ "David Grogan" : {
+ "emails" : [
+ "dgrogan@chromium.org",
+ "dgrogan@google.com"
+ ],
+ "expertise" : "IndexedDB",
+ "nicks" : [
+ "dgrogan"
+ ]
+ },
+ "David Michael Barr" : {
+ "emails" : [
+ "davidbarr@chromium.org",
+ "davidbarr@google.com",
+ "b@rr-dav.id.au"
+ ],
+ "nicks" : [
+ "barrbrain"
+ ]
+ },
+ "David Smith" : {
+ "emails" : [
+ "catfish.man@gmail.com",
+ "dsmith@webkit.org"
+ ],
+ "nicks" : [
+ "catfishman"
+ ]
+ },
+ "Diego Gonzalez" : {
+ "emails" : [
+ "diegohcg@webkit.org",
+ "diego.gonzalez@openbossa.org"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "diegohcg"
+ ]
+ },
+ "Dinu Jacob" : {
+ "emails" : [
+ "dinu.s.jacob@intel.com"
+ ],
+ "nicks" : [
+ "dsjacob"
+ ]
+ },
+ "Dmitry Gorbik" : {
+ "emails" : [
+ "dgorbik@apple.com"
+ ],
+ "nicks" : [
+ "dgorbik"
+ ]
+ },
+ "Dmitry Lomov" : {
+ "emails" : [
+ "dslomov@google.com",
+ "dslomov@chromium.org"
+ ],
+ "expertise" : "V8 bindings, Workers, gtest ",
+ "nicks" : [
+ "dslomov"
+ ]
+ },
+ "Dominic Cooney" : {
+ "emails" : [
+ "dominicc@chromium.org",
+ "dominicc@google.com"
+ ],
+ "nicks" : [
+ "dominicc"
+ ]
+ },
+ "Dominic Mazzoni" : {
+ "emails" : [
+ "dmazzoni@google.com",
+ "dmazzoni@chromium.org"
+ ],
+ "nicks" : [
+ "dmazzoni"
+ ]
+ },
+ "Dominik R\u00f6ttsches" : {
+ "emails" : [
+ "dominik.rottsches@intel.com",
+ "d-r@roettsches.de"
+ ],
+ "expertise" : "WebKit EFL, Cairo HarfBuzz Support, GraphicsContextCairo",
+ "nicks" : [
+ "drott"
+ ]
+ },
+ "Dongseong Hwang" : {
+ "emails" : [
+ "dongseong.hwang@intel.com",
+ "luxtella@gmail.com",
+ "luxtella@company100.net"
+ ],
+ "expertise" : "Accelerated Compositing, Canvas, CSS Shaders",
+ "nicks" : [
+ "dshwang"
+ ]
+ },
+ "Dongwoo Joshua Im" : {
+ "emails" : [
+ "dw.im@samsung.com",
+ "dwim79@gmail.com"
+ ],
+ "expertise" : "The EFLWebKit Port",
+ "nicks" : [
+ "dwim"
+ ]
+ },
+ "Drew Wilson" : {
+ "emails" : [
+ "atwilson@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Workers, MessagePorts",
+ "nicks" : [
+ "atwilson"
+ ]
+ },
+ "Eli Fidler" : {
+ "emails" : [
+ "efidler@blackberry.com",
+ "efidler@rim.com"
+ ],
+ "nicks" : [
+ "efidler"
+ ]
+ },
+ "Elliot Poger" : {
+ "emails" : [
+ "epoger@chromium.org"
+ ],
+ "expertise" : "Skia",
+ "nicks" : [
+ "epoger"
+ ]
+ },
+ "Eric Roman" : {
+ "emails" : [
+ "eroman@chromium.org"
+ ],
+ "expertise" : "The Chromium Port",
+ "nicks" : [
+ "eroman"
+ ]
+ },
+ "Eric Uhrhane" : {
+ "emails" : [
+ "ericu@chromium.org"
+ ],
+ "nicks" : [
+ "ericu"
+ ]
+ },
+ "Erik Arvidsson" : {
+ "emails" : [
+ "arv@chromium.org"
+ ],
+ "nicks" : [
+ "arv"
+ ]
+ },
+ "Eugene Klyuchnikov" : {
+ "emails" : [
+ "eustas@chromium.org"
+ ],
+ "nicks" : [
+ "eustas"
+ ]
+ },
+ "Evan Martin" : {
+ "emails" : [
+ "evan@chromium.org"
+ ],
+ "nicks" : [
+ "evmar"
+ ]
+ },
+ "Evan Stade" : {
+ "emails" : [
+ "estade@chromium.org"
+ ],
+ "nicks" : [
+ "estade"
+ ]
+ },
+ "Fady Samuel" : {
+ "emails" : [
+ "fsamuel@chromium.org"
+ ],
+ "nicks" : [
+ "fsamuel"
+ ]
+ },
+ "Feng Qian" : {
+ "emails" : [
+ "feng@chromium.org"
+ ]
+ },
+ "Florin Malita" : {
+ "emails" : [
+ "fmalita@chromium.org",
+ "fmalita@google.com"
+ ],
+ "expertise" : "SVG (Scalable Vector Graphics)",
+ "nicks" : [
+ "fmalita"
+ ]
+ },
+ "Fumitoshi Ukai" : {
+ "emails" : [
+ "ukai@chromium.org"
+ ],
+ "expertise" : "WebSockets, The Chromium Port",
+ "nicks" : [
+ "ukai"
+ ]
+ },
+ "Gabor Loki" : {
+ "emails" : [
+ "loki@webkit.org"
+ ],
+ "expertise" : "The QtWebKit Port, ARM JIT, Qt BuildBot",
+ "nicks" : [
+ "loki04"
+ ]
+ },
+ "Gabor Rapcsanyi" : {
+ "emails" : [
+ "rgabor@webkit.org",
+ "rgabor@inf.u-szeged.hu"
+ ],
+ "expertise" : "The QtWebKit Port, Qt BuildBot, Tools",
+ "nicks" : [
+ "rgabor"
+ ]
+ },
+ "Gavin Peters" : {
+ "emails" : [
+ "gavinp@chromium.org",
+ "gavinp@webkit.org",
+ "gavinp@google.com"
+ ],
+ "expertise" : "The Chromium Port, Resource Loading",
+ "nicks" : [
+ "gavinp"
+ ]
+ },
+ "Girish Ramakrishnan" : {
+ "emails" : [
+ "girish@forwardbias.in",
+ "ramakrishnan.girish@gmail.com"
+ ],
+ "expertise" : "The QtWebKit Port, Plug-ins",
+ "nicks" : [
+ "girishr"
+ ]
+ },
+ "Glenn Adams" : {
+ "emails" : [
+ "glenn@skynav.com"
+ ],
+ "expertise" : "CSS, CSSOM, Complex Script Layout, Line Breaking",
+ "nicks" : [
+ "gasubic"
+ ]
+ },
+ "Graham Dennis" : {
+ "emails" : [
+ "Graham.Dennis@gmail.com",
+ "gdennis@webkit.org"
+ ]
+ },
+ "Greg Bolsinga" : {
+ "emails" : [
+ "bolsinga@apple.com"
+ ]
+ },
+ "Gregg Tavares" : {
+ "emails" : [
+ "gman@chromium.org",
+ "gman@google.com"
+ ],
+ "expertise" : "WebGL, CanvasProxy",
+ "nicks" : [
+ "gman"
+ ]
+ },
+ "Grzegorz Czajkowski" : {
+ "emails" : [
+ "g.czajkowski@samsung.com"
+ ],
+ "expertise" : "WebKit-EFL API, Layout Test support",
+ "nicks" : [
+ "grzegorz"
+ ]
+ },
+ "Hans Muller" : {
+ "emails" : [
+ "giles_joplin@yahoo.com",
+ "hmuller@adobe.com"
+ ],
+ "expertise" : "CSS Exclusions",
+ "nicks" : [
+ "hansmuller"
+ ]
+ },
+ "Hans Wennborg" : {
+ "emails" : [
+ "hans@chromium.org"
+ ],
+ "nicks" : [
+ "hwennborg"
+ ]
+ },
+ "Hayato Ito" : {
+ "emails" : [
+ "hayato@chromium.org"
+ ],
+ "expertise" : "Shadow DOM, Event Handling, Reftests",
+ "nicks" : [
+ "hayato"
+ ]
+ },
+ "Helder Correia" : {
+ "emails" : [
+ "helder.correia@nokia.com"
+ ],
+ "expertise" : "The QtWebKit Port, Canvas",
+ "nicks" : [
+ "helder"
+ ]
+ },
+ "Hin-Chung Lam" : {
+ "emails" : [
+ "hclam@google.com",
+ "hclam@chromium.org"
+ ],
+ "expertise" : "HTML5 Video, Accelerated Compositing (Chromium Port)"
+ },
+ "Hironori Bono" : {
+ "emails" : [
+ "hbono@chromium.org"
+ ],
+ "nicks" : [
+ "hbono"
+ ]
+ },
+ "Hugo Parente Lima" : {
+ "emails" : [
+ "hugo.lima@openbossa.org"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "hugopl"
+ ]
+ },
+ "Ian Vollick" : {
+ "emails" : [
+ "vollick@chromium.org"
+ ],
+ "expertise" : "Graphics, Animations",
+ "nicks" : [
+ "vollick"
+ ]
+ },
+ "Igor Trindade Oliveira" : {
+ "emails" : [
+ "igor.oliveira@webkit.org",
+ "igor.o@sisa.samsung.com"
+ ],
+ "expertise" : "Animations, Accelerated Compositing, WebKitEFL",
+ "nicks" : [
+ "igoroliveira"
+ ]
+ },
+ "Ilya Sherman" : {
+ "emails" : [
+ "isherman@chromium.org"
+ ],
+ "nicks" : [
+ "isherman"
+ ]
+ },
+ "Ilya Tikhonovsky" : {
+ "emails" : [
+ "loislo@chromium.org"
+ ],
+ "nicks" : [
+ "loislo"
+ ]
+ },
+ "Ivan Ivan Krsti\u0107" : {
+ "emails" : [
+ "ike@apple.com"
+ ]
+ },
+ "Jacky Jiang" : {
+ "emails" : [
+ "jkjiang@webkit.org",
+ "zkjiang008@gmail.com",
+ "zhajiang@blackberry.com",
+ "zhajiang@rim.com"
+ ],
+ "expertise" : "The BlackBerry Port, Mobile Viewport Handling",
+ "nicks" : [
+ "jkjiang"
+ ]
+ },
+ "Jakob Petsovits" : {
+ "emails" : [
+ "jpetsovits@blackberry.com",
+ "jpetsovits@rim.com",
+ "jpetso@gmx.at"
+ ],
+ "expertise" : "The platform layer, OpenVG graphics backend",
+ "nicks" : [
+ "jpetso"
+ ]
+ },
+ "Jakub Wieczorek" : {
+ "emails" : [
+ "jwieczorek@webkit.org"
+ ],
+ "nicks" : [
+ "fawek"
+ ]
+ },
+ "James Hawkins" : {
+ "emails" : [
+ "jhawkins@chromium.org",
+ "jhawkins@google.com"
+ ],
+ "nicks" : [
+ "jhawkins"
+ ]
+ },
+ "James Kozianski" : {
+ "emails" : [
+ "koz@chromium.org",
+ "koz@google.com"
+ ],
+ "nicks" : [
+ "koz"
+ ]
+ },
+ "James Simonsen" : {
+ "emails" : [
+ "simonjam@chromium.org"
+ ],
+ "nicks" : [
+ "simonjam"
+ ]
+ },
+ "Janos Badics" : {
+ "emails" : [
+ "jbadics@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "dicska"
+ ]
+ },
+ "Jarred Nicholls" : {
+ "emails" : [
+ "jarred@webkit.org",
+ "jarred@sencha.com"
+ ],
+ "nicks" : [
+ "jarrednicholls"
+ ]
+ },
+ "Jason Liu" : {
+ "emails" : [
+ "jason.liu@torchmobile.com.cn",
+ "jasonliuwebkit@gmail.com"
+ ],
+ "nicks" : [
+ "jasonliu"
+ ]
+ },
+ "Jay Civelli" : {
+ "emails" : [
+ "jcivelli@chromium.org"
+ ],
+ "nicks" : [
+ "jcivelli"
+ ]
+ },
+ "Jeff Miller" : {
+ "emails" : [
+ "jeffm@apple.com"
+ ],
+ "nicks" : [
+ "jeffm7"
+ ]
+ },
+ "Jeffrey Pfau" : {
+ "emails" : [
+ "jpfau@apple.com"
+ ],
+ "nicks" : [
+ "jpfau"
+ ]
+ },
+ "Jenn Braithwaite" : {
+ "emails" : [
+ "jennb@chromium.org"
+ ],
+ "nicks" : [
+ "jennb"
+ ]
+ },
+ "Jens Alfke" : {
+ "emails" : [
+ "snej@chromium.org",
+ "jens@apple.com"
+ ]
+ },
+ "Jeremy Moskovich" : {
+ "emails" : [
+ "playmobil@google.com",
+ "jeremy@chromium.org"
+ ],
+ "expertise" : "The Chromium Port on OS X",
+ "nicks" : [
+ "jeremymos"
+ ]
+ },
+ "Jesus Sanchez-Palencia" : {
+ "emails" : [
+ "jesus@webkit.org",
+ "jesus.palencia@openbossa.org"
+ ],
+ "expertise" : "The QtWebKit port",
+ "nicks" : [
+ "jeez_"
+ ]
+ },
+ "Jia Pu" : {
+ "emails" : [
+ "jpu@apple.com"
+ ]
+ },
+ "Joanmarie Diggs" : {
+ "emails" : [
+ "jdiggs@igalia.com"
+ ],
+ "expertise" : "Accessibility, WebKitGTK+",
+ "nicks" : [
+ "joanie"
+ ]
+ },
+ "Joe Thomas" : {
+ "emails" : [
+ "joethomas@motorola.com"
+ ],
+ "nicks" : [
+ "joethomas"
+ ]
+ },
+ "John Abd-El-Malek" : {
+ "emails" : [
+ "jam@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Plug-ins, Workers",
+ "nicks" : [
+ "jam"
+ ]
+ },
+ "John Gregg" : {
+ "emails" : [
+ "johnnyg@google.com",
+ "johnnyg@chromium.org"
+ ],
+ "nicks" : [
+ "johnnyg"
+ ]
+ },
+ "John Knottenbelt" : {
+ "emails" : [
+ "jknotten@chromium.org"
+ ],
+ "nicks" : [
+ "jknotten"
+ ]
+ },
+ "Johnny Ding" : {
+ "emails" : [
+ "jnd@chromium.org",
+ "johnnyding.webkit@gmail.com"
+ ],
+ "nicks" : [
+ "johnnyding"
+ ]
+ },
+ "Jon Lee" : {
+ "emails" : [
+ "jonlee@apple.com"
+ ],
+ "expertise" : "Forms, Notifications",
+ "nicks" : [
+ "jonlee"
+ ]
+ },
+ "Jonathan Dong" : {
+ "emails" : [
+ "jonathan.dong.webkit@gmail.com",
+ "jonathan.dong@torchmobile.com.cn"
+ ],
+ "expertise" : "The BlackBerry Port",
+ "nicks" : [
+ "jondong"
+ ]
+ },
+ "Joone Hur" : {
+ "emails" : [
+ "joone@webkit.org",
+ "joone.hur@intel.com"
+ ],
+ "expertise" : "The WebKitGtk+ port",
+ "nicks" : [
+ "joone"
+ ]
+ },
+ "Joost de Valk" : {
+ "emails" : [
+ "joost@webkit.org",
+ "webkit-dev@joostdevalk.nl"
+ ],
+ "nicks" : [
+ "Altha"
+ ]
+ },
+ "Joshua Bell" : {
+ "emails" : [
+ "jsbell@chromium.org",
+ "jsbell@google.com"
+ ],
+ "nicks" : [
+ "jsbell"
+ ]
+ },
+ "Julie Parent" : {
+ "emails" : [
+ "jparent@google.com",
+ "jparent@chromium.org"
+ ],
+ "expertise" : "HTML Editing",
+ "nicks" : [
+ "jparent"
+ ]
+ },
+ "Jungshik Shin" : {
+ "emails" : [
+ "jshin@chromium.org"
+ ]
+ },
+ "Justin Novosad" : {
+ "emails" : [
+ "junov@google.com",
+ "junov@chromium.org"
+ ],
+ "nicks" : [
+ "junov"
+ ]
+ },
+ "Justin Schuh" : {
+ "emails" : [
+ "jschuh@chromium.org"
+ ],
+ "expertise" : "Security",
+ "nicks" : [
+ "jschuh"
+ ]
+ },
+ "Kangil Han" : {
+ "emails" : [
+ "kangil.han@samsung.com",
+ "kangil.han@gmail.com"
+ ],
+ "expertise" : "The EFLWebKit Port",
+ "nicks" : [
+ "kangil"
+ ]
+ },
+ "Karen Grunberg" : {
+ "emails" : [
+ "kareng@chromium.org"
+ ],
+ "nicks" : [
+ "kareng"
+ ]
+ },
+ "Kalyan Kondapally" : {
+ "emails" : [
+ "kalyan.kondapally@intel.com",
+ "kondapallykalyan@gmail.com"
+ ],
+ "nicks" : [
+ "kalyank"
+ ]
+ },
+ "Kaustubh Atrawalkar" : {
+ "emails" : [
+ "kaustubh@motorola.com"
+ ],
+ "nicks" : [
+ "silverroots"
+ ]
+ },
+ "Keishi Hattori" : {
+ "emails" : [
+ "keishi@webkit.org"
+ ],
+ "expertise" : "Web Inspector",
+ "nicks" : [
+ "keishi"
+ ]
+ },
+ "Kelly Norton" : {
+ "emails" : [
+ "knorton@alum.mit.edu"
+ ]
+ },
+ "Ken Buchanan" : {
+ "emails" : [
+ "kenrb@chromium.org"
+ ],
+ "nicks" : [
+ "kenrb"
+ ]
+ },
+ "Kenichi Ishibashi" : {
+ "emails" : [
+ "bashi@chromium.org"
+ ],
+ "nicks" : [
+ "bashi"
+ ]
+ },
+ "Kenji Imasaki" : {
+ "emails" : [
+ "imasaki@chromium.org"
+ ],
+ "nicks" : [
+ "imasaki"
+ ]
+ },
+ "Kent Hansen" : {
+ "emails" : [
+ "kent.hansen@nokia.com"
+ ],
+ "expertise" : "The QtWebKit Port, JavaScript/ECMAScript",
+ "nicks" : [
+ "khansen"
+ ]
+ },
+ "Kihong Kwon" : {
+ "emails" : [
+ "kihong.kwon@samsung.com"
+ ],
+ "expertise" : "Device APIs(Battery Status, Vibration...), The EFLWebKit Port",
+ "nicks" : [
+ "kihong"
+ ]
+ },
+ "Kim Gr\u00f6nholm" : {
+ "emails" : [
+ "kim.1.gronholm@nokia.com"
+ ]
+ },
+ "Kimmo Kinnunen" : {
+ "emails" : [
+ "kimmo.t.kinnunen@nokia.com",
+ "kimmok@iki.fi",
+ "ktkinnun@webkit.org"
+ ],
+ "nicks" : [
+ "kimmok"
+ ]
+ },
+ "Kinuko Yasuda" : {
+ "emails" : [
+ "kinuko@chromium.org"
+ ],
+ "nicks" : [
+ "kinuko"
+ ]
+ },
+ "Kiran Muppala" : {
+ "emails" : [
+ "cmuppala@apple.com"
+ ],
+ "nicks" : [
+ "kiranm"
+ ]
+ },
+ "Konrad Piascik" : {
+ "emails" : [
+ "kpiascik@blackberry.com",
+ "kpiascik@rim.com"
+ ],
+ "expertise" : "The BlackBerry Port, Web Inspector",
+ "nicks" : [
+ "kpiascik"
+ ]
+ },
+ "Kristof Kosztyo" : {
+ "emails" : [
+ "kkristof@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "kkristof"
+ ]
+ },
+ "Krzysztof Kowalczyk" : {
+ "emails" : [
+ "kkowalczyk@gmail.com"
+ ]
+ },
+ "Kwang Yul Seo" : {
+ "emails" : [
+ "skyul@company100.com",
+ "skyul@company100.net",
+ "kseo@webkit.org"
+ ],
+ "expertise" : "HTML Parsing, Networking, WebKit2",
+ "nicks" : [
+ "kseo"
+ ]
+ },
+ "Lauro Neto" : {
+ "emails" : [
+ "lauro.neto@openbossa.org"
+ ],
+ "nicks" : [
+ "lmoura"
+ ]
+ },
+ "Leandro Gracia Gil" : {
+ "emails" : [
+ "leandrogracia@chromium.org"
+ ],
+ "nicks" : [
+ "leandrogracia"
+ ]
+ },
+ "Leandro Pereira" : {
+ "emails" : [
+ "leandro@profusion.mobi",
+ "leandro@webkit.org"
+ ],
+ "nicks" : [
+ "acidx"
+ ]
+ },
+ "Leo Yang" : {
+ "emails" : [
+ "leoyang@blackberry.com",
+ "leoyang@rim.com",
+ "leoyang@webkit.org",
+ "leoyang.webkit@gmail.com"
+ ],
+ "expertise" : "The BlackBerry Port",
+ "nicks" : [
+ "leoyang"
+ ]
+ },
+ "Li Yin" : {
+ "emails" : [
+ "li.yin@intel.com"
+ ],
+ "expertise" : "WebSocket, WebAudio",
+ "nicks" : [
+ "liyin"
+ ]
+ },
+ "Lucas De Marchi" : {
+ "emails" : [
+ "demarchi@webkit.org",
+ "lucas.demarchi@profusion.mobi"
+ ],
+ "nicks" : [
+ "demarchi"
+ ]
+ },
+ "Lucas Forschler" : {
+ "emails" : [
+ "lforschler@apple.com"
+ ],
+ "nicks" : [
+ "lforschler"
+ ]
+ },
+ "Luciano Wolf" : {
+ "emails" : [
+ "luciano.wolf@openbossa.org"
+ ],
+ "nicks" : [
+ "luck"
+ ]
+ },
+ "Luke Macpherson" : {
+ "emails" : [
+ "macpherson@chromium.org",
+ "macpherson@google.com"
+ ],
+ "nicks" : [
+ "macpherson"
+ ]
+ },
+ "Mads Ager" : {
+ "emails" : [
+ "ager@chromium.org"
+ ],
+ "expertise" : "V8"
+ },
+ "Mahesh Kulkarni" : {
+ "emails" : [
+ "mahesh.kulkarni@nokia.com",
+ "maheshk@webkit.org"
+ ],
+ "expertise" : "The Qt port, Geolocation",
+ "nicks" : [
+ "maheshk"
+ ]
+ },
+ "Marcelo Lira" : {
+ "emails" : [
+ "marcelo.lira@openbossa.org",
+ "setanta@gmail.com"
+ ],
+ "nicks" : [
+ "setanta"
+ ]
+ },
+ "Marcus Voltis Bulach" : {
+ "emails" : [
+ "bulach@chromium.org"
+ ]
+ },
+ "Mario Sanchez Prada" : {
+ "emails" : [
+ "mario@webkit.org",
+ "mario.prada@samsung.com"
+ ],
+ "expertise" : "WebKitGTK+, a11y, Epiphany/WebKit Contributor",
+ "nicks" : [
+ "msanchez"
+ ]
+ },
+ "Mark Lam" : {
+ "emails" : [
+ "mark.lam@apple.com"
+ ],
+ "nicks" : [
+ "mlam"
+ ]
+ },
+ "Mark Pilgrim" : {
+ "emails" : [
+ "pilgrim@chromium.org"
+ ],
+ "nicks" : [
+ "pilgrim_google"
+ ]
+ },
+ "Mary Wu" : {
+ "emails" : [
+ "mawu@blackberry.com",
+ "wwendy2007@gmail.com"
+ ],
+ "nicks" : [
+ "marywu"
+ ]
+ },
+ "Matt Delaney" : {
+ "emails" : [
+ "mdelaney@apple.com"
+ ]
+ },
+ "Matt Falkenhagen" : {
+ "emails" : [
+ "falken@chromium.org"
+ ],
+ "nicks" : [
+ "falken"
+ ]
+ },
+ "Matt Lilek" : {
+ "emails" : [
+ "mlilek@apple.com",
+ "webkit@mattlilek.com",
+ "pewtermoose@webkit.org"
+ ],
+ "nicks" : [
+ "pewtermoose"
+ ]
+ },
+ "Matt Perry" : {
+ "emails" : [
+ "mpcomplete@chromium.org"
+ ]
+ },
+ "Max Vujovic" : {
+ "emails" : [
+ "mvujovic@adobe.com",
+ "maxvujovic@gmail.com"
+ ],
+ "expertise" : "CSS Shaders, CSS Filters",
+ "nicks" : [
+ "mvujovic"
+ ]
+ },
+ "Maxime Britto" : {
+ "emails" : [
+ "maxime.britto@gmail.com",
+ "britto@apple.com"
+ ]
+ },
+ "Maxime Simon" : {
+ "emails" : [
+ "simon.maxime@gmail.com",
+ "maxime.simon@webkit.org"
+ ],
+ "expertise" : "The Haiku Port",
+ "nicks" : [
+ "maxime.simon"
+ ]
+ },
+ "Michael Br\u00fcning" : {
+ "emails" : [
+ "michael.bruning@digia.com",
+ "michaelbruening@gmail.com"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "mibrunin"
+ ]
+ },
+ "Michael Nordman" : {
+ "emails" : [
+ "michaeln@google.com"
+ ],
+ "nicks" : [
+ "michaeln"
+ ]
+ },
+ "Michael Pruett" : {
+ "emails" : [
+ "michael@68k.org"
+ ],
+ "nicks" : [
+ "mpruett"
+ ]
+ },
+ "Michelangelo De Simone" : {
+ "emails" : [
+ "michelangelo@webkit.org"
+ ],
+ "expertise" : "HTML Forms, ValidityState",
+ "nicks" : [
+ "michelangelo"
+ ]
+ },
+ "Mihnea Ovidenie" : {
+ "emails" : [
+ "mihnea@adobe.com"
+ ],
+ "nicks" : [
+ "mihnea"
+ ]
+ },
+ "Mike Belshe" : {
+ "emails" : [
+ "mbelshe@chromium.org",
+ "mike@belshe.com"
+ ]
+ },
+ "Mike Fenton" : {
+ "emails" : [
+ "mifenton@blackberry.com",
+ "mifenton@rim.com",
+ "mike.fenton@torchmobile.com"
+ ],
+ "nicks" : [
+ "mfenton"
+ ]
+ },
+ "Mike Lawther" : {
+ "emails" : [
+ "mikelawther@chromium.org"
+ ],
+ "nicks" : [
+ "mikelawther"
+ ]
+ },
+ "Mike Reed" : {
+ "emails" : [
+ "reed@google.com"
+ ],
+ "nicks" : [
+ "reed"
+ ]
+ },
+ "Mike Thole" : {
+ "emails" : [
+ "mthole@mikethole.com",
+ "mthole@apple.com"
+ ],
+ "expertise" : "The Chromium Port"
+ },
+ "Mike West" : {
+ "emails" : [
+ "mkwst@chromium.org",
+ "mike@mikewest.org"
+ ],
+ "expertise" : "Content Security Policy, Chromium",
+ "nicks" : [
+ "mkwst"
+ ]
+ },
+ "Mikhail Naganov" : {
+ "emails" : [
+ "mnaganov@chromium.org"
+ ]
+ },
+ "Mikhail Pozdnyakov" : {
+ "emails" : [
+ "mikhail.pozdnyakov@intel.com"
+ ],
+ "nicks" : [
+ "MPozdnyakov"
+ ]
+ },
+ "Naoki Takano" : {
+ "emails" : [
+ "honten@chromium.org",
+ "takano.naoki@gmail.com"
+ ],
+ "expertise" : "Forms, Autofill and popup window between WebKit and Chromium port",
+ "nicks" : [
+ "honten"
+ ]
+ },
+ "Nat Duca" : {
+ "emails" : [
+ "nduca@chromium.org",
+ "nduca@google.com"
+ ],
+ "nicks" : [
+ "nduca"
+ ]
+ },
+ "Nayan Kumar K" : {
+ "emails" : [
+ "nayankk@motorola.com",
+ "nayankk@gmail.com"
+ ],
+ "nicks" : [
+ "xc0ffee"
+ ]
+ },
+ "Nima Ghanavatian" : {
+ "emails" : [
+ "nghanavatian@blackberry.com",
+ "nghanavatian@rim.com",
+ "nima.ghanavatian@gmail.com"
+ ],
+ "nicks" : [
+ "nghanavatian"
+ ]
+ },
+ "Noel Gordon" : {
+ "emails" : [
+ "noel.gordon@gmail.com",
+ "noel@chromium.org",
+ "noel@google.com"
+ ],
+ "nicks" : [
+ "noel"
+ ]
+ },
+ "Pablo Flouret" : {
+ "emails" : [
+ "pablof@motorola.com",
+ "pf@parb.es"
+ ],
+ "nicks" : [
+ "pablof"
+ ]
+ },
+ "Pam Greene" : {
+ "emails" : [
+ "pam@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Chromium's Tools and Test Infrastructure",
+ "nicks" : [
+ "pamg"
+ ]
+ },
+ "Patrick Gansterer" : {
+ "emails" : [
+ "paroga@paroga.com",
+ "paroga@webkit.org"
+ ],
+ "expertise" : "CMake build system, The WinCE Port",
+ "nicks" : [
+ "paroga"
+ ]
+ },
+ "Pavel Podivilov" : {
+ "emails" : [
+ "podivilov@chromium.org"
+ ],
+ "nicks" : [
+ "podivilov"
+ ]
+ },
+ "Peter Beverloo" : {
+ "emails" : [
+ "peter@chromium.org",
+ "peter@webkit.org",
+ "beverloo@google.com"
+ ],
+ "nicks" : [
+ "beverloo"
+ ]
+ },
+ "Peter Kasting" : {
+ "emails" : [
+ "pkasting@google.com",
+ "pkasting@chromium.org"
+ ],
+ "expertise" : "Image Decoders, Scrollbars, The Chromium port",
+ "nicks" : [
+ "pkasting"
+ ]
+ },
+ "Peter Varga" : {
+ "emails" : [
+ "pvarga@webkit.org",
+ "pvarga@inf.u-szeged.hu"
+ ],
+ "expertise" : "JavaScriptCore Regular Expressions",
+ "nicks" : [
+ "stampho"
+ ]
+ },
+ "Pierre Rossi" : {
+ "emails" : [
+ "pierre.rossi@gmail.com"
+ ],
+ "nicks" : [
+ "elproxy"
+ ]
+ },
+ "Pierre d'Herbemont" : {
+ "emails" : [
+ "pdherbemont@free.fr",
+ "pdherbemont@apple.com"
+ ],
+ "expertise" : "Media Elements",
+ "nicks" : [
+ "pdherbemont"
+ ]
+ },
+ "Pierre-Olivier Latour" : {
+ "emails" : [
+ "pol@apple.com"
+ ],
+ "nicks" : [
+ "pol"
+ ]
+ },
+ "Pratik Solanki" : {
+ "emails" : [
+ "psolanki@apple.com"
+ ],
+ "nicks" : [
+ "psolanki"
+ ]
+ },
+ "Pravin D" : {
+ "emails" : [
+ "pravind@webkit.org",
+ "pravin.d@samsung.com"
+ ],
+ "nicks" : [
+ "pravind"
+ ]
+ },
+ "Qi Zhang" : {
+ "emails" : [
+ "qi.zhang02180@gmail.com"
+ ],
+ "nicks" : [
+ "qi"
+ ]
+ },
+ "Rafael Antognolli" : {
+ "emails" : [
+ "antognolli@profusion.mobi"
+ ],
+ "nicks" : [
+ "antognolli"
+ ]
+ },
+ "Rafael Brandao" : {
+ "emails" : [
+ "rafael.lobo@webkit.org"
+ ],
+ "nicks" : [
+ "rafaelbrandao"
+ ]
+ },
+ "Rafael Weinstein" : {
+ "emails" : [
+ "rafaelw@chromium.org"
+ ],
+ "nicks" : [
+ "rafaelw"
+ ]
+ },
+ "Raphael Kubo da Costa" : {
+ "emails" : [
+ "rakuco@webkit.org",
+ "rakuco@FreeBSD.org",
+ "raphael.kubo.da.costa@intel.com"
+ ],
+ "expertise" : "CMake build system, The EFLWebKit port",
+ "nicks" : [
+ "rakuco"
+ ]
+ },
+ "Ravi Kasibhatla" : {
+ "emails" : [
+ "ravi.kasibhatla@motorola.com"
+ ],
+ "nicks" : [
+ "kphanee"
+ ]
+ },
+ "Raymond Toy" : {
+ "emails" : [
+ "rtoy@google.com",
+ "rtoy@chromium.org"
+ ],
+ "nicks" : [
+ "rtoy"
+ ]
+ },
+ "Renata Hodovan" : {
+ "emails" : [
+ "reni@webkit.org"
+ ],
+ "nicks" : [
+ "reni"
+ ]
+ },
+ "Robert Hogan" : {
+ "emails" : [
+ "robert@webkit.org",
+ "robert@roberthogan.net",
+ "lists@roberthogan.net"
+ ],
+ "nicks" : [
+ "rhogan"
+ ]
+ },
+ "Robert Kroeger" : {
+ "emails" : [
+ "rjkroege@chromium.org"
+ ],
+ "nicks" : [
+ "rjkroege"
+ ]
+ },
+ "Roger Fong" : {
+ "emails" : [
+ "roger_fong@apple.com"
+ ],
+ "nicks" : [
+ "rfong"
+ ]
+ },
+ "Roland Steiner" : {
+ "emails" : [
+ "rolandsteiner@chromium.org"
+ ]
+ },
+ "Ryuan Choi" : {
+ "emails" : [
+ "ryuan.choi@samsung.com",
+ "ryuan.choi@gmail.com"
+ ],
+ "expertise" : "The EFLWebKit Port",
+ "nicks" : [
+ "ryuan"
+ ]
+ },
+ "Sadrul Habib Chowdhury" : {
+ "emails" : [
+ "sadrul@chromium.org"
+ ],
+ "nicks" : [
+ "sadrul",
+ "sadrulhc"
+ ]
+ },
+ "Sami Ky\u00f6stil\u00e4" : {
+ "emails" : [
+ "skyostil@chromium.org"
+ ],
+ "nicks" : [
+ "skyostil"
+ ]
+ },
+ "Satish Sampath" : {
+ "emails" : [
+ "satish@chromium.org"
+ ]
+ },
+ "Scott Violet" : {
+ "emails" : [
+ "sky@chromium.org"
+ ],
+ "expertise" : "The Chromium Port",
+ "nicks" : [
+ "sky"
+ ]
+ },
+ "Sergio Villar Senin" : {
+ "emails" : [
+ "svillar@igalia.com",
+ "sergio@webkit.org"
+ ],
+ "expertise" : "WebKitGTK+ port, WebKit2",
+ "nicks" : [
+ "svillar"
+ ]
+ },
+ "Shawn Singh" : {
+ "emails" : [
+ "shawnsingh@chromium.org"
+ ],
+ "nicks" : [
+ "shawnsingh"
+ ]
+ },
+ "Shinya Kawanaka" : {
+ "emails" : [
+ "shinyak@chromium.org"
+ ],
+ "nicks" : [
+ "shinyak"
+ ]
+ },
+ "Siddharth Mathur" : {
+ "emails" : [
+ "siddharth.mathur@nokia.com"
+ ],
+ "nicks" : [
+ "simathur"
+ ]
+ },
+ "Silvia Pfeiffer" : {
+ "emails" : [
+ "silviapf@chromium.org"
+ ],
+ "expertise" : "Media elements & controls, track element & WebVTT",
+ "nicks" : [
+ "silvia"
+ ]
+ },
+ "Simon Pena" : {
+ "emails" : [
+ "simon.pena@samsung.com",
+ "spenap@gmail.com",
+ "spena@igalia.com"
+ ],
+ "nicks" : [
+ "spenap"
+ ]
+ },
+ "Steve Lacey" : {
+ "emails" : [
+ "sjl@chromium.org"
+ ],
+ "nicks" : [
+ "stevela"
+ ]
+ },
+ "Sudarsana Nagineni" : {
+ "emails" : [
+ "naginenis@gmail.com",
+ "sudarsana.nagineni@linux.intel.com",
+ "sudarsana.nagineni@intel.com"
+ ],
+ "expertise" : "The EFLWebKit port, Memory Leaks",
+ "nicks" : [
+ "babu"
+ ]
+ },
+ "Szilard Ledan-Muntean" : {
+ "emails" : [
+ "szledan@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "szledan"
+ ]
+ },
+ "Taiju Tsuiki" : {
+ "emails" : [
+ "tzik@chromium.org"
+ ],
+ "nicks" : [
+ "tzik"
+ ]
+ },
+ "Takashi Sakamoto" : {
+ "emails" : [
+ "tasak@google.com"
+ ],
+ "nicks" : [
+ "tasak"
+ ]
+ },
+ "Takashi Toyoshima" : {
+ "emails" : [
+ "toyoshim@chromium.org",
+ "toyoshim+watchlist@chromium.org"
+ ],
+ "expertise" : "WebSocket",
+ "nicks" : [
+ "toyoshim"
+ ]
+ },
+ "Terry Anderson" : {
+ "emails" : [
+ "tdanderson@chromium.org"
+ ],
+ "nicks" : [
+ "tdanderson"
+ ]
+ },
+ "Thiago Marcos P. Santos" : {
+ "emails" : [
+ "tmpsantos@gmail.com",
+ "thiago.santos@intel.com"
+ ],
+ "expertise" : "CSS Device Adaptation, CMake build system, The EFLWebKit port",
+ "nicks" : [
+ "tmpsantos"
+ ]
+ },
+ "Thomas Sepez" : {
+ "emails" : [
+ "tsepez@chromium.org"
+ ],
+ "nicks" : [
+ "tsepez"
+ ]
+ },
+ "Tom Hudson" : {
+ "emails" : [
+ "tomhudson@google.com",
+ "tomhudson@chromium.org"
+ ],
+ "nicks" : [
+ "tomhudson"
+ ]
+ },
+ "Tom Zakrajsek" : {
+ "emails" : [
+ "tomz@codeaurora.org"
+ ],
+ "nicks" : [
+ "tomz"
+ ]
+ },
+ "Tommy Widenflycht" : {
+ "emails" : [
+ "tommyw@google.com"
+ ],
+ "nicks" : [
+ "tommyw"
+ ]
+ },
+ "Trey Matteson" : {
+ "emails" : [
+ "trey@usa.net"
+ ],
+ "nicks" : [
+ "trey"
+ ]
+ },
+ "Tristan O'Tierney" : {
+ "emails" : [
+ "tristan@otierney.net",
+ "tristan@apple.com"
+ ]
+ },
+ "Vangelis Kokkevis" : {
+ "emails" : [
+ "vangelis@chromium.org"
+ ],
+ "nicks" : [
+ "vangelis"
+ ]
+ },
+ "Viatcheslav Ostapenko" : {
+ "emails" : [
+ "ostap73@gmail.com",
+ "sl.ostapenko@samsung.com",
+ "ostapenko.viatcheslav@nokia.com"
+ ],
+ "nicks" : [
+ "ostap"
+ ]
+ },
+ "Victor Carbune" : {
+ "emails" : [
+ "vcarbune@chromium.org",
+ "victor@rosedu.org"
+ ],
+ "expertise" : "HTML5 <Track>",
+ "nicks" : [
+ "vcarbune"
+ ]
+ },
+ "Victor Wang" : {
+ "emails" : [
+ "victorw@chromium.org"
+ ],
+ "nicks" : [
+ "victorw"
+ ]
+ },
+ "Victoria Kirst" : {
+ "emails" : [
+ "vrk@chromium.org",
+ "vrk@google.com"
+ ],
+ "nicks" : [
+ "vrk"
+ ]
+ },
+ "Vincent Scheib" : {
+ "emails" : [
+ "scheib@chromium.org"
+ ],
+ "nicks" : [
+ "scheib"
+ ]
+ },
+ "Vineet Chaudhary" : {
+ "emails" : [
+ "rgf748@motorola.com"
+ ],
+ "nicks" : [
+ "vineetc"
+ ]
+ },
+ "Vitaly Repeshko" : {
+ "emails" : [
+ "vitalyr@chromium.org"
+ ]
+ },
+ "Vivek Galatage" : {
+ "emails" : [
+ "vivekg@webkit.org",
+ "vivek.vg@samsung.com"
+ ],
+ "expertise" : "Web Inspector",
+ "nicks" : [
+ "vivekg"
+ ]
+ },
+ "W. James MacLean" : {
+ "emails" : [
+ "wjmaclean@chromium.org"
+ ],
+ "nicks" : [
+ "seumas"
+ ]
+ },
+ "William Siegrist" : {
+ "emails" : [
+ "wsiegrist@apple.com"
+ ],
+ "expertise" : "webkit.org",
+ "nicks" : [
+ "wms"
+ ]
+ },
+ "Xianzhu Wang" : {
+ "emails" : [
+ "wangxianzhu@chromium.org",
+ "phnixwxz@gmail.com",
+ "wangxianzhu@google.com"
+ ],
+ "nicks" : [
+ "wangxianzhu"
+ ]
+ },
+ "Xiaohai Wei" : {
+ "emails" : [
+ "james.wei@intel.com",
+ "wistoch@chromium.org"
+ ],
+ "expertise" : "WebAudio/ChromiumAndroidx86",
+ "nicks" : [
+ "wistoch"
+ ]
+ },
+ "Xiaomei Ji" : {
+ "emails" : [
+ "xji@chromium.org"
+ ],
+ "nicks" : [
+ "xji"
+ ]
+ },
+ "Xingnan Wang" : {
+ "emails" : [
+ "xingnan.wang@intel.com"
+ ],
+ "nicks" : [
+ "xingnan"
+ ]
+ },
+ "Yaar Schnitman" : {
+ "emails" : [
+ "yaar@chromium.org",
+ "yaar@google.com"
+ ]
+ },
+ "Yael Aharon" : {
+ "emails" : [
+ "yael@webkit.org"
+ ],
+ "nicks" : [
+ "yael"
+ ]
+ },
+ "Yi Shen" : {
+ "emails" : [
+ "max.hong.shen@gmail.com",
+ "yi.shen@sisa.samsung.com",
+ "yi.4.shen@nokia.com"
+ ]
+ },
+ "Yongjun Zhang" : {
+ "emails" : [
+ "yongjun_zhang@apple.com",
+ "yongjun.zhang@nokia.com"
+ ]
+ },
+ "Yoshifumi Inoue" : {
+ "emails" : [
+ "yosin@chromium.org"
+ ],
+ "expertise" : "HTML5 Forms especially for multiple-fields UI, charset encoding, decimal arithmetic",
+ "nicks" : [
+ "yosin"
+ ]
+ },
+ "Yuqiang Xian" : {
+ "emails" : [
+ "yuqiang.xian@intel.com"
+ ],
+ "expertise" : "JavaScriptCore"
+ },
+ "Yuzo Fujishima" : {
+ "emails" : [
+ "yuzo@google.com"
+ ],
+ "nicks" : [
+ "yuzo"
+ ]
+ },
+ "Zalan Bujtas" : {
+ "emails" : [
+ "zalan@apple.com",
+ "zbujtas@gmail.com",
+ "zalan.bujtas@nokia.com"
+ ],
+ "expertise" : "Frame flattening",
+ "nicks" : [
+ "zalan"
+ ]
+ },
+ "Zeno Albisser" : {
+ "emails" : [
+ "zeno@webkit.org",
+ "zeno.albisser@nokia.com",
+ "zeno.albisser@digia.com"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "zalbisser"
+ ]
+ },
+ "Zhenyao Mo" : {
+ "emails" : [
+ "zmo@google.com"
+ ],
+ "nicks" : [
+ "zhenyao"
+ ]
+ },
+ "Zoltan Arvai" : {
+ "emails" : [
+ "zarvai@inf.u-szeged.hu"
+ ],
+ "expertise" : "The QtWebKit Port, QtWebKit Build Environment",
+ "nicks" : [
+ "azbest_hu"
+ ]
+ },
+ "Zoltan Horvath" : {
+ "emails" : [
+ "zoltan@webkit.org",
+ "hzoltan@inf.u-szeged.hu",
+ "horvath.zoltan.6@stud.u-szeged.hu"
+ ],
+ "expertise" : "The QtWebKit Port, Custom Allocation Framework, PerformanceTests - memory measurements",
+ "nicks" : [
+ "zoltan"
+ ]
+ },
+ "\u017dan Dober\u0161ek" : {
+ "emails" : [
+ "zandobersek@gmail.com",
+ "zdobersek@igalia.com"
+ ],
+ "nicks" : [
+ "zdobersek"
+ ]
+ }
+ },
+ "Contributors" : {
+ "Adobe Bug Tracker" : {
+ "emails" : [
+ "WebkitBugTracker@adobe.com"
+ ]
+ },
+ "Aharon Lanin" : {
+ "emails" : [
+ "aharon@google.com"
+ ]
+ },
+ "Alan Cutter" : {
+ "emails" : [
+ "alancutter@chromium.org"
+ ],
+ "nicks" : [
+ "alancutter"
+ ]
+ },
+ "Alan Stearns" : {
+ "emails" : [
+ "stearns@adobe.com"
+ ],
+ "nicks" : [
+ "astearns"
+ ]
+ },
+ "Alejandro Pineiro" : {
+ "emails" : [
+ "apinheiro@igalia.com"
+ ]
+ },
+ "Alexey Marinichev" : {
+ "emails" : [
+ "amarinichev@chromium.org",
+ "amarinichev@google.com"
+ ],
+ "nicks" : [
+ "amarinichev"
+ ]
+ },
+ "Andras Piroska" : {
+ "emails" : [
+ "pandras@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "andris88"
+ ]
+ },
+ "Anne van Kesteren" : {
+ "emails" : [
+ "annevk@annevk.nl"
+ ],
+ "nicks" : [
+ "annevk"
+ ]
+ },
+ "Annie Sullivan" : {
+ "emails" : [
+ "sullivan@chromium.org"
+ ],
+ "nicks" : [
+ "annie"
+ ]
+ },
+ "Anton Obzhirov" : {
+ "emails" : [
+ "a.obzhirov@samsung.com"
+ ],
+ "nicks" : [
+ "aobzhirov"
+ ]
+ },
+ "Anton Vayvod" : {
+ "emails" : [
+ "avayvod@chromium.org"
+ ],
+ "nicks" : [
+ "avayvod"
+ ]
+ },
+ "Aryeh Gregor" : {
+ "emails" : [
+ "ayg@aryeh.name"
+ ],
+ "nicks" : [
+ "AryehGregor"
+ ]
+ },
+ "Balazs Ankes" : {
+ "emails" : [
+ "bank@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "abalazs"
+ ]
+ },
+ "Bem Jones-Bey" : {
+ "emails" : [
+ "bjonesbe@adobe.com"
+ ],
+ "nicks" : [
+ "bemjb"
+ ]
+ },
+ "Brian Salomon" : {
+ "emails" : [
+ "bsalomon@google.com"
+ ]
+ },
+ "Christian Biesinger" : {
+ "emails" : [
+ "cbiesinger@chromium.org"
+ ],
+ "nicks" : [
+ "cbiesinger"
+ ]
+ },
+ "Commit Queue" : {
+ "emails" : [
+ "commit-queue@webkit.org"
+ ]
+ },
+ "Daniel Sievers" : {
+ "emails" : [
+ "sievers@chromium.org"
+ ]
+ },
+ "David Dorwin" : {
+ "emails" : [
+ "ddorwin@chromium.org"
+ ],
+ "nicks" : [
+ "ddorwin"
+ ]
+ },
+ "David Reveman" : {
+ "emails" : [
+ "reveman@chromium.org"
+ ],
+ "nicks" : [
+ "reveman"
+ ]
+ },
+ "Douglas Davidson" : {
+ "emails" : [
+ "ddavidso@apple.com"
+ ]
+ },
+ "Douglas Stockwell" : {
+ "emails" : [
+ "dstockwell@chromium.org"
+ ],
+ "nicks" : [
+ "dstockwell"
+ ]
+ },
+ "Edward O'Connor" : {
+ "emails" : [
+ "eoconnor@apple.com"
+ ],
+ "nicks" : [
+ "hober"
+ ]
+ },
+ "Eric Penner" : {
+ "emails" : [
+ "epenner@chromium.org"
+ ],
+ "nicks" : [
+ "epenner"
+ ]
+ },
+ "Felician Marton" : {
+ "emails" : [
+ "felician@inf.u-szeged.hu",
+ "marton.felician.zoltan@stud.u-szeged.hu"
+ ],
+ "nicks" : [
+ "Felician"
+ ]
+ },
+ "Frédéric Wang" : {
+ "emails" : [
+ "fred.wang@free.fr"
+ ],
+ "nicks" : [
+ "fredw"
+ ]
+ },
+ "Finnur Thorarinsson" : {
+ "emails" : [
+ "finnur@chromium.org",
+ "finnur.webkit@gmail.com"
+ ],
+ "nicks" : [
+ "finnur"
+ ]
+ },
+ "Forms Bugs" : {
+ "emails" : [
+ "forms-bugs@chromium.org"
+ ]
+ },
+ "Gabor Ballabas" : {
+ "emails" : [
+ "gaborb@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "bgabor"
+ ]
+ },
+ "Grace Kloba" : {
+ "emails" : [
+ "klobag@chromium.org"
+ ],
+ "nicks" : [
+ "klobag"
+ ]
+ },
+ "Greg Simon" : {
+ "emails" : [
+ "gregsimon@chromium.org"
+ ],
+ "nicks" : [
+ "gregsimon"
+ ]
+ },
+ "Gwang Yoon Hwang" : {
+ "emails" : [
+ "ryumiel@company100.net",
+ "ryumiel@company100.com"
+ ],
+ "nicks" : [
+ "ryumiel"
+ ]
+ },
+ "Hao Zheng" : {
+ "emails" : [
+ "zhenghao@chromium.org"
+ ]
+ },
+ "Harald Alvestrand" : {
+ "emails" : [
+ "hta@google.com"
+ ],
+ "nicks" : [
+ "hta"
+ ]
+ },
+ "Ian Hickson" : {
+ "emails" : [
+ "ian@hixie.ch"
+ ],
+ "nicks" : [
+ "hixie"
+ ]
+ },
+ "Jae Hyun Park" : {
+ "emails" : [
+ "jae.park@company100.net"
+ ],
+ "nicks" : [
+ "jaepark"
+ ]
+ },
+ "James Craig" : {
+ "emails" : [
+ "james@cookiecrook.com",
+ "jcraig@apple.com"
+ ],
+ "nicks" : [
+ "jcraig"
+ ]
+ },
+ "Jeff Timanus" : {
+ "emails" : [
+ "twiz@chromium.org",
+ "twiz@google.com"
+ ],
+ "nicks" : [
+ "twiz"
+ ]
+ },
+ "Jing Zhao" : {
+ "emails" : [
+ "jingzhao@chromium.org"
+ ]
+ },
+ "John Bates" : {
+ "emails" : [
+ "jbates@google.com",
+ "jbates@chromium.org"
+ ],
+ "nicks" : [
+ "jbates"
+ ]
+ },
+ "John Bauman" : {
+ "emails" : [
+ "jbauman@chromium.org",
+ "jbauman@google.com"
+ ],
+ "nicks" : [
+ "jbauman"
+ ]
+ },
+ "John Mellor" : {
+ "emails" : [
+ "johnme@chromium.org"
+ ],
+ "nicks" : [
+ "johnme"
+ ]
+ },
+ "Jonathan Backer" : {
+ "emails" : [
+ "backer@chromium.org"
+ ],
+ "nicks" : [
+ "backer"
+ ]
+ },
+ "Koji Hara" : {
+ "emails" : [
+ "kojih@chromium.org"
+ ],
+ "nicks" : [
+ "kojih"
+ ]
+ },
+ "Koji Ishii" : {
+ "emails" : [
+ "kojiishi@gmail.com"
+ ]
+ },
+ "Kulanthaivel Palanichamy" : {
+ "emails" : [
+ "kulanthaivel@codeaurora.org"
+ ],
+ "nicks" : [
+ "kvel"
+ ]
+ },
+ "Lia Chen" : {
+ "emails" : [
+ "liachen@rim.com"
+ ]
+ },
+ "Mihai Balan" : {
+ "emails" : [
+ "mibalan@adobe.com"
+ ],
+ "nicks" : [
+ "miChou"
+ ]
+ },
+ "Mihai Maerean" : {
+ "emails" : [
+ "mmaerean@adobe.com"
+ ],
+ "nicks" : [
+ "mmaerean"
+ ]
+ },
+ "Min Qin" : {
+ "emails" : [
+ "qinmin@chromium.org"
+ ]
+ },
+ "Nandor Huszka" : {
+ "emails" : [
+ "hnandor@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "hnandor"
+ ]
+ },
+ "Nils Barth" : {
+ "emails" : [
+ "nbarth@chromium.org"
+ ],
+ "nicks" : [
+ "nbarth"
+ ]
+ },
+ "Oliver Varga" : {
+ "emails" : [
+ "voliver@inf.u-szeged.hu",
+ "Varga.Oliver@stud.u-szeged.hu"
+ ],
+ "nicks" : [
+ "TwistO"
+ ]
+ },
+ "Peter Gal" : {
+ "emails" : [
+ "galpeter@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "elecro"
+ ]
+ },
+ "Peter Linss" : {
+ "emails" : [
+ "peter.linss@hp.com"
+ ],
+ "nicks" : [
+ "plinss"
+ ]
+ },
+ "Radar WebKit Bug Importer" : {
+ "emails" : [
+ "webkit-bug-importer@group.apple.com"
+ ]
+ },
+ "Radu Stavila" : {
+ "emails" : [
+ "stavila@adobe.com"
+ ],
+ "nicks" : [
+ "radustavila"
+ ]
+ },
+ "Raul Hudea" : {
+ "emails" : [
+ "rhudea@adobe.com"
+ ],
+ "nicks" : [
+ "rhudea"
+ ]
+ },
+ "Roland Takacs" : {
+ "emails" : [
+ "rtakacs@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "rtakacs"
+ ]
+ },
+ "Tab Atkins" : {
+ "emails" : [
+ "tabatkins@google.com",
+ "jackalmage@gmail.com"
+ ],
+ "nicks" : [
+ "tabatkins"
+ ]
+ },
+ "Tamas Czene" : {
+ "emails" : [
+ "tczene@inf.u-szeged.hu",
+ "Czene.Tamas@stud.u-szeged.hu"
+ ],
+ "nicks" : [
+ "tczene"
+ ]
+ },
+ "Tien-Ren Chen" : {
+ "emails" : [
+ "trchen@chromium.org"
+ ],
+ "nicks" : [
+ "trchen"
+ ]
+ },
+ "Tim 'mithro' Ansell" : {
+ "emails" : [
+ "mithro@mithis.com"
+ ],
+ "nicks" : [
+ "mithro"
+ ]
+ },
+ "Tim Volodine" : {
+ "emails" : [
+ "timvolodine@chromium.org"
+ ],
+ "nicks" : [
+ "timvolodine"
+ ]
+ },
+ "Web Components Team" : {
+ "emails" : [
+ "webcomponents-bugzilla@chromium.org"
+ ]
+ },
+ "WebKit Review Bot" : {
+ "emails" : [
+ "webkit.review.bot@gmail.com"
+ ],
+ "nicks" : [
+ "sheriff-bot"
+ ]
+ },
+ "Wyatt Carss" : {
+ "emails" : [
+ "wcarss@chromium.org",
+ "wcarss@google.com"
+ ],
+ "nicks" : [
+ "wcarss"
+ ]
+ },
+ "Zeev Lieber" : {
+ "emails" : [
+ "zlieber@chromium.org"
+ ]
+ },
+ "Zsolt Feher" : {
+ "emails" : [
+ "feherzs@inf.u-szeged.hu"
+ ],
+ "nicks" : [
+ "Smith"
+ ]
+ }
+ },
+ "Reviewers" : {
+ "Abhishek Arya" : {
+ "emails" : [
+ "inferno@chromium.org"
+ ],
+ "expertise" : "Security, Layout and Rendering",
+ "nicks" : [
+ "inferno-sec"
+ ]
+ },
+ "Ada Chan" : {
+ "emails" : [
+ "adachan@apple.com"
+ ],
+ "expertise" : "WebKit on Windows",
+ "nicks" : [
+ "chanada"
+ ]
+ },
+ "Adam Barth" : {
+ "emails" : [
+ "abarth@webkit.org"
+ ],
+ "expertise" : "Security, HTML parser, webkit-patch, FrameLoader (sadly), V8 Bindings, The Chromium Port",
+ "nicks" : [
+ "abarth"
+ ]
+ },
+ "Adam Roben" : {
+ "emails" : [
+ "aroben@webkit.org",
+ "aroben@apple.com"
+ ],
+ "expertise" : "Plug-ins and Java (Win, General), WebKit API (Win), Windows build system, General Windows port issues, Developer Tools (Web Inspector), Tools",
+ "nicks" : [
+ "aroben"
+ ]
+ },
+ "Adam Treat" : {
+ "emails" : [
+ "treat@kde.org",
+ "treat@webkit.org"
+ ],
+ "expertise" : "The QtWebKit Port, The HTML Parser/Tokenizer, The platform layer, Image loading and painting, ScrollView and friends",
+ "nicks" : [
+ "manyoso"
+ ]
+ },
+ "Adele Peterson" : {
+ "emails" : [
+ "adele@apple.com"
+ ],
+ "expertise" : "HTML Forms, Security, Layout and Rendering, Web Compatibility (General)",
+ "nicks" : [
+ "adele"
+ ]
+ },
+ "Adrienne Walker" : {
+ "emails" : [
+ "enne@google.com",
+ "enne@chromium.org"
+ ],
+ "nicks" : [
+ "enne"
+ ]
+ },
+ "Alejandro G. Castro" : {
+ "emails" : [
+ "alex@igalia.com",
+ "alex@webkit.org"
+ ],
+ "expertise" : "WebKitGTK+, Cairo graphics backend, ShadowBlur rendering, Epiphany/WebKit Contributor",
+ "nicks" : [
+ "alexg__"
+ ]
+ },
+ "Alexander Pavlov" : {
+ "emails" : [
+ "apavlov@chromium.org",
+ "pavlov81@gmail.com"
+ ],
+ "expertise" : "Developer Tools, Web Inspector, CSS OM",
+ "nicks" : [
+ "apavlov"
+ ]
+ },
+ "Alexandru Chiculita" : {
+ "emails" : [
+ "achicu@adobe.com"
+ ],
+ "expertise" : "CSS Regions, CSS Exclusions, CSS Filters, CSS Custom Filters",
+ "nicks" : [
+ "achicu"
+ ]
+ },
+ "Alexey Proskuryakov" : {
+ "emails" : [
+ "ap@webkit.org",
+ "ap@apple.com"
+ ],
+ "nicks" : [
+ "ap"
+ ]
+ },
+ "Alexis Menard" : {
+ "emails" : [
+ "alexis@webkit.org",
+ "menard@kde.org",
+ "alexis.menard@openbossa.org"
+ ],
+ "expertise" : "The QtWebKit Port, CSS, CSS shorthands, HTML5 Media Elements",
+ "nicks" : [
+ "darktears"
+ ]
+ },
+ "Alice Liu" : {
+ "emails" : [
+ "alice.liu@apple.com"
+ ],
+ "expertise" : "HTML Editing, Memory Use / Leaks, Core DOM, Web Compatibility (Web Apps), Web Compatibility (General), Bug Mastery, Web Accessibility",
+ "nicks" : [
+ "aliu"
+ ]
+ },
+ "Allan Sandfeld Jensen" : {
+ "emails" : [
+ "allan.jensen@digia.com",
+ "kde@carewolf.com",
+ "sandfeld@kde.org",
+ "allan.jensen@nokia.com"
+ ],
+ "expertise" : "QtWebKit, CSS Selectors, Touch Adjustment, Hit Testing",
+ "nicks" : [
+ "carewolf"
+ ]
+ },
+ "Alp Toker" : {
+ "emails" : [
+ "alp@nuanti.com",
+ "alp@atoker.com",
+ "alp@webkit.org"
+ ],
+ "expertise" : "GTK+ WebKit Port, Cairo graphics backend (including canvas, SVG), CURL HTTP backend",
+ "nicks" : [
+ "alp"
+ ]
+ },
+ "Anders Carlsson" : {
+ "emails" : [
+ "andersca@apple.com",
+ "acarlsson@apple.com"
+ ],
+ "expertise" : "Storage, Networking, Core DOM, Plug-ins and Java (Win, General), XML, JavaScript/ECMAScript",
+ "nicks" : [
+ "andersca"
+ ]
+ },
+ "Andreas Kling" : {
+ "emails" : [
+ "akling@apple.com",
+ "kling@webkit.org",
+ "awesomekling@apple.com",
+ "andreas.kling@nokia.com"
+ ],
+ "expertise" : "CSS, HTML DOM, Core DOM, Canvas, JavaScript DOM bindings, Memory use",
+ "nicks" : [
+ "kling"
+ ]
+ },
+ "Andy Estes" : {
+ "emails" : [
+ "aestes@apple.com"
+ ],
+ "expertise" : "Layout and rendering, plug-in loading, HTML parsing, web compatibility",
+ "nicks" : [
+ "estes"
+ ]
+ },
+ "Antonio Gomes" : {
+ "emails" : [
+ "tonikitoo@webkit.org",
+ "a1.gomes@sisa.samsung.com",
+ "antonio.netto@samsung.com",
+ "antonio.gomes@openbossa.org"
+ ],
+ "expertise" : "{BlackBerry, EFL, Qt}WebKit ports, Hit testing, Touch/Event handling, Rendering and scrolling",
+ "nicks" : [
+ "tonikitoo"
+ ]
+ },
+ "Antti Koivisto" : {
+ "emails" : [
+ "koivisto@iki.fi",
+ "antti@apple.com",
+ "antti.j.koivisto@nokia.com"
+ ],
+ "expertise" : "HTML DOM, Core DOM, Loader, Cache, CSS OM, style resolve, performance",
+ "nicks" : [
+ "anttik"
+ ]
+ },
+ "Ariya Hidayat" : {
+ "emails" : [
+ "ariya.hidayat@gmail.com",
+ "ariya@sencha.com",
+ "ariya@webkit.org"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "ariya"
+ ]
+ },
+ "Benjamin Poulain" : {
+ "emails" : [
+ "benjamin@webkit.org",
+ "bpoulain@apple.com",
+ "benjamin.poulain@nokia.com",
+ "ikipou@gmail.com"
+ ],
+ "expertise" : "The Rendering, Performance, Mobile stuff, Touch support.",
+ "nicks" : [
+ "benjaminp"
+ ]
+ },
+ "Beth Dakin" : {
+ "emails" : [
+ "bdakin@apple.com"
+ ],
+ "expertise" : "CSS (Cascading Style Sheets), Layout and Rendering, Resolution-Independence, HTML Parsing, Tables, Web Accessibility",
+ "nicks" : [
+ "dethbakin"
+ ]
+ },
+ "Brady Eidson" : {
+ "emails" : [
+ "beidson@apple.com"
+ ],
+ "expertise" : "Networking, Storage, WebCore icon database, Back/forward cache, History",
+ "nicks" : [
+ "bradee-oh"
+ ]
+ },
+ "Brent Fulgham" : {
+ "emails" : [
+ "bfulgham@webkit.org",
+ "bfulgham@apple.com"
+ ],
+ "expertise" : "The WinCairo Port, WebKit on Windows",
+ "nicks" : [
+ "bfulgham"
+ ]
+ },
+ "Brian Weinstein" : {
+ "emails" : [
+ "bweinstein@apple.com"
+ ],
+ "expertise" : "WebKit on Windows, Tools",
+ "nicks" : [
+ "bweinstein"
+ ]
+ },
+ "Caio Marcelo de Oliveira Filho" : {
+ "emails" : [
+ "cmarcelo@webkit.org",
+ "cmarcelo@gmail.com",
+ "caio.oliveira@openbossa.org"
+ ],
+ "nicks" : [
+ "cmarcelo"
+ ]
+ },
+ "Cameron Zwarich" : {
+ "emails" : [
+ "zwarich@apple.com",
+ "cwzwarich@apple.com",
+ "cwzwarich@webkit.org"
+ ]
+ },
+ "Carlos Garcia Campos" : {
+ "emails" : [
+ "cgarcia@igalia.com",
+ "carlosgc@gnome.org",
+ "carlosgc@webkit.org"
+ ],
+ "expertise" : "The WebKitGTK+ Port, WebKit2, Glib unicode backend, GTK+ contributor, Epiphany contributor",
+ "nicks" : [
+ "KaL"
+ ]
+ },
+ "Chang Shu" : {
+ "emails" : [
+ "cshu@webkit.org",
+ "c.shu@sisa.samsung.com"
+ ],
+ "expertise" : "JavaScript DOM bindings, WebKit2, QtWebKit port",
+ "nicks" : [
+ "cshu"
+ ]
+ },
+ "Chris Blumenberg" : {
+ "emails" : [
+ "cblu@apple.com"
+ ],
+ "nicks" : [
+ "cblu"
+ ]
+ },
+ "Chris Fleizach" : {
+ "emails" : [
+ "cfleizach@apple.com"
+ ],
+ "expertise" : "Accessibility",
+ "nicks" : [
+ "cfleizach"
+ ]
+ },
+ "Chris Jerdonek" : {
+ "emails" : [
+ "cjerdonek@webkit.org"
+ ],
+ "nicks" : [
+ "cjerdonek"
+ ]
+ },
+ "Chris Marrin" : {
+ "emails" : [
+ "cmarrin@apple.com"
+ ],
+ "nicks" : [
+ "cmarrin"
+ ]
+ },
+ "Chris Rogers" : {
+ "emails" : [
+ "crogers@google.com"
+ ],
+ "nicks" : [
+ "crogers"
+ ]
+ },
+ "Christophe Dumez" : {
+ "emails" : [
+ "dchris@gmail.com",
+ "ch.dumez@sisa.samsung.com",
+ "christophe.dumez@intel.com"
+ ],
+ "expertise" : "The EFLWebKit Port, Bindings generator",
+ "nicks" : [
+ "cdumez"
+ ]
+ },
+ "Csaba Osztrogon\u00e1c" : {
+ "emails" : [
+ "ossy@webkit.org"
+ ],
+ "nicks" : [
+ "ossy"
+ ]
+ },
+ "Dan Bernstein" : {
+ "emails" : [
+ "mitz@webkit.org",
+ "mitz@apple.com"
+ ],
+ "expertise" : "Layout and Rendering, Bidirectional text",
+ "nicks" : [
+ "mitzpettel"
+ ]
+ },
+ "Daniel Bates" : {
+ "emails" : [
+ "dbates@webkit.org"
+ ],
+ "expertise" : "XSSAuditor, Drag and Drop, Tools, Perl, svn-apply/unapply",
+ "nicks" : [
+ "dydz"
+ ]
+ },
+ "Darin Adler" : {
+ "emails" : [
+ "darin@apple.com"
+ ],
+ "expertise" : "HTML Forms, WebKit API (Mac, Win), HTML Editing, Performance, JavaScript/ECMAScript, Text Encoding, Core DOM, HTML DOM, Canvas, JavaScript DOM Bindings, ObjC DOM Bindings, Basic types and data structures, Tools, New Features / Standards Support, General (probably a good backup on most topics even if not specifically an expert)",
+ "nicks" : [
+ "darin"
+ ]
+ },
+ "Darin Fisher" : {
+ "emails" : [
+ "fishd@chromium.org",
+ "darin@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, WebKit API (Chromium), Page Loading",
+ "nicks" : [
+ "fishd"
+ ]
+ },
+ "David Harrison" : {
+ "emails" : [
+ "harrison@apple.com"
+ ],
+ "expertise" : "HTML Editing, Accessibility",
+ "nicks" : [
+ "harrison"
+ ]
+ },
+ "David Hyatt" : {
+ "emails" : [
+ "hyatt@apple.com"
+ ],
+ "expertise" : "Layout and Rendering, CSS (Cascading Style Sheets), HTML Forms, Tables, Text Layout, Fonts, MathML, Memory Cache, HTMLDOM, Core DOM, HTML Parsing, New Features / Standards Support, XML, XSLT, Printing",
+ "nicks" : [
+ "dhyatt",
+ "hyatt"
+ ]
+ },
+ "David Kilzer" : {
+ "emails" : [
+ "ddkilzer@webkit.org",
+ "ddkilzer@apple.com"
+ ],
+ "expertise" : "iPhone port, Xcode build system, Tools, Perl, git, WebArchive",
+ "nicks" : [
+ "ddkilzer"
+ ]
+ },
+ "David Levin" : {
+ "emails" : [
+ "levin@chromium.org"
+ ],
+ "nicks" : [
+ "dave_levin"
+ ]
+ },
+ "Dean Jackson" : {
+ "emails" : [
+ "dino@apple.com"
+ ],
+ "expertise" : "Transforms, Transitions, Animations, Filters",
+ "nicks" : [
+ "dino"
+ ]
+ },
+ "Dimitri Glazkov" : {
+ "emails" : [
+ "dglazkov@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Shadow DOM, DOM, HTML Forms, Shadow DOM, Web Components, V8 Bindings, InspectorController, garden-o-matic",
+ "nicks" : [
+ "dglazkov"
+ ]
+ },
+ "Dirk Pranke" : {
+ "emails" : [
+ "dpranke@chromium.org"
+ ],
+ "expertise" : "Build/test infrastructure (stuff under Tools/Scripts)",
+ "nicks" : [
+ "dpranke"
+ ]
+ },
+ "Dirk Schulze" : {
+ "emails" : [
+ "krit@webkit.org"
+ ],
+ "expertise" : "Cairo graphics backend, Canvas, SVG (Scalable Vector Graphics)",
+ "nicks" : [
+ "krit"
+ ]
+ },
+ "Dmitry Titov" : {
+ "emails" : [
+ "dimich@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Workers, Timers, Threading",
+ "nicks" : [
+ "dimich"
+ ]
+ },
+ "Don Melton" : {
+ "emails" : [
+ "gramps@apple.com"
+ ],
+ "nicks" : [
+ "gramps"
+ ]
+ },
+ "Dumitru Daniliuc" : {
+ "emails" : [
+ "dumi@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, WebSQLDatabases",
+ "nicks" : [
+ "dumi"
+ ]
+ },
+ "Elliott Sprehn" : {
+ "emails" : [
+ "esprehn@chromium.org",
+ "esprehn+autocc@chromium.org"
+ ],
+ "expertise" : "Layout and Rendering, V8/JSC Bindings, Generated content, Shadow DOM, Web Compatibility (General)",
+ "nicks" : [
+ "esprehn"
+ ]
+ },
+ "Emil A Eklund" : {
+ "emails" : [
+ "eae@chromium.org"
+ ],
+ "expertise" : "Layout and rendering, Core DOM, HTML DOM",
+ "nicks" : [
+ "eae"
+ ]
+ },
+ "Enrica Casucci" : {
+ "emails" : [
+ "enrica@apple.com"
+ ],
+ "expertise" : "HTML Editing, Drag and drop, Input methods",
+ "nicks" : [
+ "enrica"
+ ]
+ },
+ "Eric Carlson" : {
+ "emails" : [
+ "eric.carlson@apple.com"
+ ],
+ "expertise" : "HTML5 Media Elements",
+ "nicks" : [
+ "eric_carlson"
+ ]
+ },
+ "Eric Seidel" : {
+ "emails" : [
+ "eric@webkit.org"
+ ],
+ "expertise" : "The Rendering Engine, Commit Queue, Memory Leaks, webkit-patch, The Chromium Port",
+ "nicks" : [
+ "eseidel"
+ ]
+ },
+ "Filip Pizlo" : {
+ "emails" : [
+ "fpizlo@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript",
+ "nicks" : [
+ "pizlo"
+ ]
+ },
+ "Gavin Barraclough" : {
+ "emails" : [
+ "barraclough@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript",
+ "nicks" : [
+ "gbarra"
+ ]
+ },
+ "Geoffrey Garen" : {
+ "emails" : [
+ "ggaren@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript, Performance, Memory Use / Leaks, Memory Cache, Core DOM, HTML DOM, JavaScript DOM Bindings, Web Compatibility (General), JavaScriptCore C API, FastMalloc",
+ "nicks" : [
+ "ggaren"
+ ]
+ },
+ "George Staikos" : {
+ "emails" : [
+ "staikos@kde.org",
+ "staikos@webkit.org"
+ ],
+ "expertise" : "Core KHTML Contributor, The QtWebKit Port"
+ },
+ "Gustavo Noronha Silva" : {
+ "emails" : [
+ "gns@gnome.org",
+ "kov@webkit.org",
+ "gustavo.noronha@collabora.co.uk",
+ "gustavo.noronha@collabora.com"
+ ],
+ "expertise" : "WebKitGTK+ API, Soup HTTP backend, Debian Packaging, A little bit of Epiphany",
+ "nicks" : [
+ "kov"
+ ]
+ },
+ "Gyuyoung Kim" : {
+ "emails" : [
+ "gyuyoung.kim@samsung.com",
+ "gyuyoung.kim@webkit.org"
+ ],
+ "expertise" : "The EFLWebKit Port",
+ "nicks" : [
+ "gyuyoung"
+ ]
+ },
+ "Hajime Morrita" : {
+ "emails" : [
+ "morrita@google.com",
+ "morrita@chromium.org"
+ ],
+ "nicks" : [
+ "morrita"
+ ]
+ },
+ "Holger Freyther" : {
+ "emails" : [
+ "zecke@selfish.org",
+ "zecke@webkit.org"
+ ],
+ "expertise" : "The QtWebKit Port, The GTK+ WebKit Port",
+ "nicks" : [
+ "zecke"
+ ]
+ },
+ "James Robinson" : {
+ "emails" : [
+ "jamesr@chromium.org",
+ "jamesr@google.com"
+ ],
+ "expertise" : "Layout, rendering, the Chromium port.",
+ "nicks" : [
+ "jamesr"
+ ]
+ },
+ "Jan Alonzo" : {
+ "emails" : [
+ "jmalonzo@gmail.com",
+ "jmalonzo@webkit.org"
+ ],
+ "expertise" : "The WebKitGtk Port, Autotools Build",
+ "nicks" : [
+ "janm"
+ ]
+ },
+ "Jer Noble" : {
+ "emails" : [
+ "jer.noble@apple.com"
+ ],
+ "nicks" : [
+ "jernoble"
+ ]
+ },
+ "Jeremy Orlow" : {
+ "emails" : [
+ "jorlow@webkit.org",
+ "jorlow@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, DOM Storage (i.e., LocalStorage and SessionStorage)",
+ "nicks" : [
+ "jorlow"
+ ]
+ },
+ "Jessie Berlin" : {
+ "emails" : [
+ "jberlin@webkit.org",
+ "jberlin@apple.com"
+ ],
+ "nicks" : [
+ "jessieberlin"
+ ]
+ },
+ "Jian Li" : {
+ "emails" : [
+ "jianli@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Workers, File API, FormData",
+ "nicks" : [
+ "jianli"
+ ]
+ },
+ "Jocelyn Turcotte" : {
+ "emails" : [
+ "jocelyn.turcotte@digia.com",
+ "jocelyn.turcotte@nokia.com"
+ ],
+ "expertise" : "The QtWebKit port, Tools, Loader, Rendering, Accelerated Compositing",
+ "nicks" : [
+ "jturcotte"
+ ]
+ },
+ "Jochen Eisinger" : {
+ "emails" : [
+ "jochen@chromium.org",
+ "jochen@chromium.or"
+ ],
+ "nicks" : [
+ "jochen__"
+ ]
+ },
+ "John Sullivan" : {
+ "emails" : [
+ "sullivan@apple.com"
+ ],
+ "expertise" : "Safari UI, Printing",
+ "nicks" : [
+ "sullivan"
+ ]
+ },
+ "Jon Honeycutt" : {
+ "emails" : [
+ "jhoneycutt@apple.com"
+ ],
+ "expertise" : "WebKit on Windows, Plug-ins, Windows accessibility",
+ "nicks" : [
+ "jhoneycutt"
+ ]
+ },
+ "Joseph Pecoraro" : {
+ "emails" : [
+ "joepeck@webkit.org",
+ "pecoraro@apple.com"
+ ],
+ "expertise" : "Web Inspector",
+ "nicks" : [
+ "JoePeck"
+ ]
+ },
+ "Julien Chaffraix" : {
+ "emails" : [
+ "jchaffraix@webkit.org",
+ "julien.chaffraix@gmail.com",
+ "jchaffraix@google.com",
+ "jchaffraix@codeaurora.org"
+ ],
+ "expertise" : "Layout and rendering, Tables, XMLHttpRequest",
+ "nicks" : [
+ "jchaffraix"
+ ]
+ },
+ "Justin Garcia" : {
+ "emails" : [
+ "justin.garcia@apple.com"
+ ],
+ "expertise" : "Multipart Mixed Replace, HTML Editing",
+ "nicks" : [
+ "justing"
+ ]
+ },
+ "Ken Kocienda" : {
+ "emails" : [
+ "kocienda@apple.com"
+ ]
+ },
+ "Kenneth Rohde Christiansen" : {
+ "emails" : [
+ "kenneth@webkit.org",
+ "kenneth.r.christiansen@intel.com",
+ "kenneth.christiansen@gmail.com"
+ ],
+ "expertise" : "WebKit/WebKit2 API, The Qt and EFL WebKit Port, Mobile Adaptions, Frame Flattening, Mobile Viewport Handling, Input methods.",
+ "nicks" : [
+ "kenneth_",
+ "kenneth",
+ "kenne"
+ ]
+ },
+ "Kenneth Russell" : {
+ "emails" : [
+ "kbr@google.com",
+ "kbr@chromium.org"
+ ],
+ "expertise" : " WebGL (Chromium and Safari ports), Canvas",
+ "nicks" : [
+ "kbr_google",
+ "kbrgg"
+ ]
+ },
+ "Kent Tamura" : {
+ "emails" : [
+ "tkent@chromium.org",
+ "tkent@google.com"
+ ],
+ "expertise" : "HTML Forms, DumpRenderTree for Chromium, The Chromium Port",
+ "nicks" : [
+ "tkent"
+ ]
+ },
+ "Kentaro Hara" : {
+ "emails" : [
+ "haraken@chromium.org"
+ ],
+ "expertise" : "V8 bindings, JSC bindings, Perl scripts, Garbage collection, DOM lifetime",
+ "nicks" : [
+ "haraken"
+ ]
+ },
+ "Kevin Decker" : {
+ "emails" : [
+ "kdecker@apple.com"
+ ],
+ "expertise" : "Safari UI, Plug-ins and Java (Mac, General), Enterprise Application Compatibility",
+ "nicks" : [
+ "superkevin"
+ ]
+ },
+ "Kevin McCullough" : {
+ "emails" : [
+ "kmccullough@apple.com"
+ ],
+ "expertise" : " JavaScript/ECMAScript, Developer Tools (Web Inspector, JavaScript Profilier), Web Compatibility (Web Apps)",
+ "nicks" : [
+ "maculloch"
+ ]
+ },
+ "Kevin Ollivier" : {
+ "emails" : [
+ "kevino@theolliviers.com",
+ "kevino@webkit.org"
+ ],
+ "expertise" : "The wxWebKit Port, Bakefile build system",
+ "nicks" : [
+ "kollivier"
+ ]
+ },
+ "Lars Knoll" : {
+ "emails" : [
+ "lars@trolltech.com",
+ "lars@kde.org",
+ "lars.knoll@nokia.com"
+ ],
+ "expertise" : "Original author of KHTML which WebKit is based on, The QtWebKit Port, Layout and Rendering, CSS (Cascading Style Sheets), HTML Forms, Tables, HTML DOM, Core DOM, HTML Parsing",
+ "nicks" : [
+ "lars"
+ ]
+ },
+ "Laszlo Gombos" : {
+ "emails" : [
+ "laszlo.gombos@webkit.org",
+ "l.gombos@samsung.com",
+ "laszlo.gombos@gmail.com",
+ "laszlo.1.gombos@nokia.com"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "lgombos"
+ ]
+ },
+ "Levi Weintraub" : {
+ "emails" : [
+ "leviw@chromium.org",
+ "leviw@google.com",
+ "lweintraub@apple.com"
+ ],
+ "expertise" : "Layout (bidi and line layout, sub-pixel positioning), svg, editing",
+ "nicks" : [
+ "leviw"
+ ]
+ },
+ "Luiz Agostini" : {
+ "emails" : [
+ "luiz@webkit.org",
+ "luiz.agostini@openbossa.org"
+ ],
+ "expertise" : "The QtWebKit Port",
+ "nicks" : [
+ "lca"
+ ]
+ },
+ "Maciej Stachowiak" : {
+ "emails" : [
+ "mjs@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript, Performance, Security, Basic types and data structures, FastMalloc, DOM Bindings for JavaScript, Core DOM, HTML DOM, JavaScript DOM Bindings, WebKit API (Mac, Win), HTML Editing, Networking, Tools, New Features / Standards Support, General (probably a good backup on most topics even if not specifically an expert)",
+ "nicks" : [
+ "othermaciej"
+ ]
+ },
+ "Mark Hahnenberg" : {
+ "emails" : [
+ "mhahnenberg@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript",
+ "nicks" : [
+ "mhahnenberg"
+ ]
+ },
+ "Mark Rowe" : {
+ "emails" : [
+ "mrowe@apple.com"
+ ],
+ "expertise" : "Build/Release Engineering, Malloc, FastMalloc",
+ "nicks" : [
+ "bdash"
+ ]
+ },
+ "Martin Robinson" : {
+ "emails" : [
+ "mrobinson@webkit.org",
+ "mrobinson@igalia.com",
+ "martin.james.robinson@gmail.com"
+ ],
+ "expertise" : "The WebKitGTK+ Port, Cairo graphics backend, soup HTTP backend",
+ "nicks" : [
+ "mrobinson"
+ ]
+ },
+ "Michael Saboff" : {
+ "emails" : [
+ "msaboff@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript",
+ "nicks" : [
+ "msaboff"
+ ]
+ },
+ "Mihai Parparita" : {
+ "emails" : [
+ "mihaip@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, Layout tests, History",
+ "nicks" : [
+ "mihaip"
+ ]
+ },
+ "Nate Chapin" : {
+ "emails" : [
+ "japhet@chromium.org"
+ ],
+ "expertise" : "The Chromium Port, V8 Bindings",
+ "nicks" : [
+ "japhet",
+ "natechapin"
+ ]
+ },
+ "Nico Weber" : {
+ "emails" : [
+ "thakis@chromium.org",
+ "thakis@google.com"
+ ],
+ "expertise" : "The Chromium Port, Graphics, Skia, CoreGraphics",
+ "nicks" : [
+ "thakis"
+ ]
+ },
+ "Nikolas Zimmermann" : {
+ "emails" : [
+ "zimmermann@kde.org",
+ "zimmermann@physik.rwth-aachen.de",
+ "zimmermann@webkit.org",
+ "nzimmermann@blackberry.com",
+ "nzimmermann@rim.com"
+ ],
+ "expertise" : "Core KHTML contributor, The QtWebKit Port, Text Layout, JavaScript DOM bindings, Code generation in general, XML, SVG (Scalable Vector Graphics)",
+ "nicks" : [
+ "wildfox"
+ ]
+ },
+ "Noam Rosenthal" : {
+ "emails" : [
+ "noam@webkit.org",
+ "noam.rosenthal@nokia.com"
+ ],
+ "expertise" : "TextureMapper, graphics for the Qt port",
+ "nicks" : [
+ "noamr"
+ ]
+ },
+ "Ojan Vafai" : {
+ "emails" : [
+ "ojan@chromium.org",
+ "ojan.autocc@gmail.com"
+ ],
+ "expertise" : "Selections, Editing, webkit-patch, run-webkit-tests, The Chromium port, HTML Forms, Layout and Rendering, Web Compatibility (General) ",
+ "nicks" : [
+ "ojan"
+ ]
+ },
+ "Oliver Hunt" : {
+ "emails" : [
+ "oliver@apple.com"
+ ],
+ "expertise" : "JavaScript/ECMAScript, FastMalloc",
+ "nicks" : [
+ "olliej"
+ ]
+ },
+ "Pavel Feldman" : {
+ "emails" : [
+ "pfeldman@chromium.org",
+ "pfeldman@google.com"
+ ],
+ "expertise" : "Developer Tools, Web Inspector",
+ "nicks" : [
+ "pfeldman"
+ ]
+ },
+ "Philip Rogers" : {
+ "emails" : [
+ "pdr@google.com",
+ "pdr@chromium.org"
+ ],
+ "expertise" : "SVG (Scalable Vector Graphics)",
+ "nicks" : [
+ "pdr"
+ ]
+ },
+ "Philippe Normand" : {
+ "emails" : [
+ "pnormand@igalia.com",
+ "philn@webkit.org",
+ "philn@igalia.com"
+ ],
+ "expertise" : "WebKitGTK+, Media support (focused on the GStreamer implementation)",
+ "nicks" : [
+ "philn"
+ ]
+ },
+ "Richard Williamson" : {
+ "emails" : [
+ "rjw@apple.com"
+ ],
+ "nicks" : [
+ "rjw"
+ ]
+ },
+ "Rob Buis" : {
+ "emails" : [
+ "rwlbuis@gmail.com",
+ "rwlbuis@webkit.org",
+ "rbuis@blackberry.com",
+ "rbuis@rim.com"
+ ],
+ "expertise" : "KDE contributor, The QtWebKit Port, SVG (Scalable Vector Graphics)",
+ "nicks" : [
+ "rwlbuis"
+ ]
+ },
+ "Ryosuke Niwa" : {
+ "emails" : [
+ "rniwa@webkit.org"
+ ],
+ "expertise" : "HTML Editing, Core DOM, HTML DOM, Event Handling",
+ "nicks" : [
+ "rniwa"
+ ]
+ },
+ "Sam Weinig" : {
+ "emails" : [
+ "sam@webkit.org",
+ "weinig@apple.com"
+ ],
+ "expertise" : "HTML DOM, Core DOM, DOM Bindings (JavaScript, Objective-C and COM), Security, DumpRenderTree",
+ "nicks" : [
+ "weinig"
+ ]
+ },
+ "Shinichiro Hamaji" : {
+ "emails" : [
+ "hamaji@chromium.org"
+ ],
+ "expertise" : "CSS (Cascading Style Sheets), Tools",
+ "nicks" : [
+ "hamaji"
+ ]
+ },
+ "Simon Fraser" : {
+ "emails" : [
+ "simon.fraser@apple.com"
+ ],
+ "expertise" : "Accelerated Compositing, Transitions and Animations, CSS Transforms",
+ "nicks" : [
+ "smfr"
+ ]
+ },
+ "Simon Hausmann" : {
+ "emails" : [
+ "hausmann@webkit.org",
+ "hausmann@kde.org",
+ "simon.hausmann@digia.com"
+ ],
+ "expertise" : "The QtWebKit Port, Former KHTML contributor",
+ "nicks" : [
+ "tronical"
+ ]
+ },
+ "Stephanie Lewis" : {
+ "emails" : [
+ "slewis@apple.com"
+ ],
+ "expertise" : "Performance Testing, Tools",
+ "nicks" : [
+ "sundiamonde"
+ ]
+ },
+ "Stephen Chenney" : {
+ "emails" : [
+ "schenney@chromium.org"
+ ],
+ "expertise" : "SVG (Scalable Vector Graphics)",
+ "nicks" : [
+ "schenney"
+ ]
+ },
+ "Stephen White" : {
+ "emails" : [
+ "senorblanco@chromium.org"
+ ],
+ "expertise" : "Skia port, GPU acceleration",
+ "nicks" : [
+ "senorblanco"
+ ]
+ },
+ "Steve Block" : {
+ "emails" : [
+ "steveblock@chromium.org",
+ "steveblock@google.com"
+ ],
+ "expertise" : "Geolocation, Android Port",
+ "nicks" : [
+ "steveblock"
+ ]
+ },
+ "Steve Falkenburg" : {
+ "emails" : [
+ "sfalken@apple.com"
+ ],
+ "expertise" : "WebKit on Windows",
+ "nicks" : [
+ "sfalken"
+ ]
+ },
+ "Tim Horton" : {
+ "emails" : [
+ "thorton@apple.com",
+ "timothy_horton@apple.com"
+ ],
+ "expertise" : "SVG/Canvas/Graphics, WebKit2",
+ "nicks" : [
+ "thorton"
+ ]
+ },
+ "Tim Omernick" : {
+ "emails" : [
+ "timo@apple.com"
+ ]
+ },
+ "Timothy Hatcher" : {
+ "emails" : [
+ "timothy@apple.com",
+ "timothy@hatcher.name"
+ ],
+ "expertise" : "WebKit API (Mac), WebKit Web Site, Developer Tools (Web Inspector, JavaScript Debugger)",
+ "nicks" : [
+ "xenon"
+ ]
+ },
+ "Tony Chang" : {
+ "emails" : [
+ "tony@chromium.org"
+ ],
+ "expertise" : "Chromium Linux, Editing, Drag and Drop",
+ "nicks" : [
+ "tony^work"
+ ]
+ },
+ "Tony Gentilcore" : {
+ "emails" : [
+ "tonyg@chromium.org"
+ ],
+ "expertise" : "HTML5 parsing, Web Timing",
+ "nicks" : [
+ "tonyg-cr"
+ ]
+ },
+ "Tor Arne Vestb\u00f8" : {
+ "emails" : [
+ "vestbo@webkit.org",
+ "tor.arne.vestbo@nokia.com"
+ ],
+ "expertise" : "The QtWebKit Port, HTML5 Media Elements, Plug-ins, Tools",
+ "nicks" : [
+ "torarne"
+ ]
+ },
+ "Vicki Murley" : {
+ "emails" : [
+ "vicki@apple.com"
+ ]
+ },
+ "Vsevolod Vlasov" : {
+ "emails" : [
+ "vsevik@chromium.org"
+ ],
+ "expertise" : "Developer Tools, Web Inspector",
+ "nicks" : [
+ "vsevik"
+ ]
+ },
+ "Xan Lopez" : {
+ "emails" : [
+ "xan.lopez@gmail.com",
+ "xan@gnome.org",
+ "xan@webkit.org",
+ "xlopez@igalia.com"
+ ],
+ "expertise" : "WebKitGTK+, Soup HTTP Backend, libsoup Contributor, WebKit a11y (focused on the ATK implementation), Epiphany/WebKit maintainer",
+ "nicks" : [
+ "xan"
+ ]
+ },
+ "Yong Li" : {
+ "emails" : [
+ "yong.li.webkit@outlook.com"
+ ],
+ "nicks" : [
+ "yong"
+ ]
+ },
+ "Yury Semikhatsky" : {
+ "emails" : [
+ "yurys@chromium.org"
+ ],
+ "expertise" : "Developer Tools, Web Inspector",
+ "nicks" : [
+ "yurys"
+ ]
+ },
+ "Yuta Kitamura" : {
+ "emails" : [
+ "yutak@chromium.org"
+ ],
+ "expertise" : "WebSocket, The Chromium Port",
+ "nicks" : [
+ "yutak"
+ ]
+ },
+ "Zack Rusin" : {
+ "emails" : [
+ "zack@kde.org"
+ ],
+ "expertise" : "Core KHTML contributor, The QtWebKit Port",
+ "nicks" : [
+ "zackr"
+ ]
+ },
+ "Zoltan Herczeg" : {
+ "emails" : [
+ "zherczeg@webkit.org",
+ "zherczeg@inf.u-szeged.hu"
+ ],
+ "expertise" : "The QtWebKit Port, JIT (ARM), SVG, optimizations (SMP, SIMD), Graphics",
+ "nicks" : [
+ "zherczeg"
+ ]
+ }
+ }
+}
diff --git a/Tools/Scripts/webkitpy/common/config/ews.json b/Tools/Scripts/webkitpy/common/config/ews.json
new file mode 100644
index 000000000..b85cf7ce7
--- /dev/null
+++ b/Tools/Scripts/webkitpy/common/config/ews.json
@@ -0,0 +1,56 @@
+{
+ "GTK EWS": {
+ "port": "gtk",
+ "watchers": [
+ "xan.lopez@gmail.com"
+ ]
+ },
+ "GTK WK2 EWS": {
+ "port": "gtk-wk2",
+ "watchers": [
+ "xan.lopez@gmail.com"
+ ]
+ },
+ "EFL EWS": {
+ "port": "efl",
+ "watchers": [
+ "gyuyoung.kim@webkit.org"
+ ]
+ },
+ "EFL WK2 EWS": {
+ "port": "efl-wk2",
+ "watchers": [
+ "gyuyoung.kim@webkit.org"
+ ]
+ },
+ "Qt EWS": {
+ "port": "qt",
+ "watchers": [
+ "webkit-ews@sed.inf.u-szeged.hu"
+ ]
+ },
+ "Qt WK2 EWS": {
+ "port": "qt-wk2",
+ "watchers": [
+ "webkit-ews@sed.inf.u-szeged.hu"
+ ]
+ },
+ "Win EWS": {
+ "port": "win",
+ "runTests": true
+ },
+ "Mac EWS": {
+ "port": "mac",
+ "watchers": [
+ "rniwa@webkit.org"
+ ],
+ "runTests": true
+ },
+ "Mac WK2 EWS": {
+ "port": "mac-wk2",
+ "watchers": [
+ "rniwa@webkit.org"
+ ],
+ "runTests": true
+ }
+}
diff --git a/Tools/Scripts/webkitpy/common/config/irc.py b/Tools/Scripts/webkitpy/common/config/irc.py
index 950c573ad..8e198c610 100644
--- a/Tools/Scripts/webkitpy/common/config/irc.py
+++ b/Tools/Scripts/webkitpy/common/config/irc.py
@@ -27,5 +27,5 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
server="irc.freenode.net"
-port=6667
+port=6665
channel="#webkit"
diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py
index f6f3db0c8..0c20dae51 100644
--- a/Tools/Scripts/webkitpy/common/config/ports.py
+++ b/Tools/Scripts/webkitpy/common/config/ports.py
@@ -61,14 +61,15 @@ class DeprecatedPort(object):
@staticmethod
def port(port_name):
ports = {
- "chromium": ChromiumPort,
- "chromium-android": ChromiumAndroidPort,
- "chromium-xvfb": ChromiumXVFBPort,
"gtk": GtkPort,
+ "gtk-wk2": GtkWK2Port,
"mac": MacPort,
+ "mac-wk2": MacWK2Port,
"win": WinPort,
"qt": QtPort,
+ "qt-wk2": QtWK2Port,
"efl": EflPort,
+ "efl-wk2": EflWK2Port,
}
default_port = {
"Windows": WinPort,
@@ -116,20 +117,29 @@ class DeprecatedPort(object):
def run_perl_unittests_command(self):
return self.script_shell_command("test-webkitperl")
- def layout_tests_results_path(self):
- return os.path.join(self.results_directory, "full_results.json")
-
- def unit_tests_results_path(self):
- return os.path.join(self.results_directory, "webkit_unit_tests_output.xml")
+ def run_bindings_tests_command(self):
+ return self.script_shell_command("run-bindings-tests")
class MacPort(DeprecatedPort):
port_flag_name = "mac"
+class MacWK2Port(DeprecatedPort):
+ port_flag_name = "mac-wk2"
+
+ def run_webkit_tests_command(self):
+ command = super(MacWK2Port, self).run_webkit_tests_command()
+ command.append("-2")
+ return command
+
+
class WinPort(DeprecatedPort):
port_flag_name = "win"
+ def run_bindings_tests_command(self):
+ return None
+
class GtkPort(DeprecatedPort):
port_flag_name = "gtk"
@@ -138,6 +148,7 @@ class GtkPort(DeprecatedPort):
command = super(GtkPort, self).build_webkit_command(build_style=build_style)
command.append("--gtk")
command.append("--update-gtk")
+ command.append("--no-webkit2")
command.append(super(GtkPort, self).makeArgs())
return command
@@ -147,73 +158,75 @@ class GtkPort(DeprecatedPort):
return command
+class GtkWK2Port(DeprecatedPort):
+ port_flag_name = "gtk-wk2"
+
+ def build_webkit_command(self, build_style=None):
+ command = super(GtkWK2Port, self).build_webkit_command(build_style=build_style)
+ command.append("--gtk")
+ command.append("--update-gtk")
+ command.append("--no-webkit1")
+ command.append(super(GtkWK2Port, self).makeArgs())
+ return command
+
+ def run_webkit_tests_command(self):
+ command = super(GtkWK2Port, self).run_webkit_tests_command()
+ command.append("--gtk")
+ command.append("-2")
+ return command
+
+
class QtPort(DeprecatedPort):
port_flag_name = "qt"
def build_webkit_command(self, build_style=None):
command = super(QtPort, self).build_webkit_command(build_style=build_style)
command.append("--qt")
+ command.append("--no-webkit2")
command.append(super(QtPort, self).makeArgs())
return command
-
-class EflPort(DeprecatedPort):
- port_flag_name = "efl"
-
- def build_webkit_command(self, build_style=None):
- command = super(EflPort, self).build_webkit_command(build_style=build_style)
- command.append("--efl")
- command.append("--update-efl")
- command.append(super(EflPort, self).makeArgs())
+ def run_webkit_tests_command(self):
+ command = super(QtPort, self).run_webkit_tests_command()
+ command.append("--qt")
return command
-class ChromiumPort(DeprecatedPort):
- port_flag_name = "chromium"
-
- def update_webkit_command(self, non_interactive=False):
- command = super(ChromiumPort, self).update_webkit_command(non_interactive=non_interactive)
- command.append("--chromium")
- if non_interactive:
- command.append("--force-update")
- return command
+class QtWK2Port(DeprecatedPort):
+ port_flag_name = "qt-wk2"
def build_webkit_command(self, build_style=None):
- command = super(ChromiumPort, self).build_webkit_command(build_style=build_style)
- command.append("--chromium")
- command.append("--update-chromium")
+ command = super(QtWK2Port, self).build_webkit_command(build_style=build_style)
+ command.append("--qt")
+ command.append(super(QtWK2Port, self).makeArgs())
return command
def run_webkit_tests_command(self):
- # Note: This could be run-webkit-tests now.
- command = self.script_shell_command("new-run-webkit-tests")
- command.append("--chromium")
- command.append("--skip-failing-tests")
+ command = super(QtWK2Port, self).run_webkit_tests_command()
+ command.append("--qt")
+ command.append("-2")
return command
- def run_webkit_unit_tests_command(self):
- return self.script_shell_command("run-chromium-webkit-unit-tests")
-
- def run_javascriptcore_tests_command(self):
- return None
-
-
-class ChromiumAndroidPort(ChromiumPort):
- port_flag_name = "chromium-android"
- def update_webkit_command(self, non_interactive=False):
- command = super(ChromiumAndroidPort, self).update_webkit_command(non_interactive=non_interactive)
- command.append("--chromium-android")
- return command
+class EflPort(DeprecatedPort):
+ port_flag_name = "efl"
def build_webkit_command(self, build_style=None):
- command = super(ChromiumAndroidPort, self).build_webkit_command(build_style=build_style)
- command.append("--chromium-android")
+ command = super(EflPort, self).build_webkit_command(build_style=build_style)
+ command.append("--efl")
+ command.append("--update-efl")
+ command.append("--no-webkit2")
+ command.append(super(EflPort, self).makeArgs())
return command
-class ChromiumXVFBPort(ChromiumPort):
- port_flag_name = "chromium-xvfb"
+class EflWK2Port(DeprecatedPort):
+ port_flag_name = "efl-wk2"
- def run_webkit_tests_command(self):
- return ["xvfb-run"] + super(ChromiumXVFBPort, self).run_webkit_tests_command()
+ def build_webkit_command(self, build_style=None):
+ command = super(EflWK2Port, self).build_webkit_command(build_style=build_style)
+ command.append("--efl")
+ command.append("--update-efl")
+ command.append("--no-webkit1")
+ command.append(super(EflWK2Port, self).makeArgs())
+ return command
diff --git a/Tools/Scripts/webkitpy/common/config/ports_mock.py b/Tools/Scripts/webkitpy/common/config/ports_mock.py
index 1d1431115..779796cd8 100644
--- a/Tools/Scripts/webkitpy/common/config/ports_mock.py
+++ b/Tools/Scripts/webkitpy/common/config/ports_mock.py
@@ -28,17 +28,9 @@
class MockPort(object):
- results_directory = "/mock-results"
-
def name(self):
return "MockPort"
- def layout_tests_results_path(self):
- return "/mock-results/full_results.json"
-
- def unit_tests_results_path(self):
- return "/mock-results/webkit_unit_tests_output.xml"
-
def check_webkit_style_command(self):
return ["mock-check-webkit-style"]
@@ -65,3 +57,6 @@ class MockPort(object):
def run_webkit_tests_command(self):
return ['mock-run-webkit-tests']
+
+ def run_bindings_tests_command(self):
+ return ['mock-run-bindings-tests']
diff --git a/Tools/Scripts/webkitpy/common/config/ports_unittest.py b/Tools/Scripts/webkitpy/common/config/ports_unittest.py
index 58d58d473..daca54a68 100644
--- a/Tools/Scripts/webkitpy/common/config/ports_unittest.py
+++ b/Tools/Scripts/webkitpy/common/config/ports_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2009, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.config.ports import *
@@ -43,34 +42,28 @@ class DeprecatedPortTest(unittest.TestCase):
def test_gtk_port(self):
self.assertEqual(GtkPort().flag(), "--port=gtk")
self.assertEqual(GtkPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests") + ["--gtk"])
- self.assertEqual(GtkPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--gtk", "--update-gtk", DeprecatedPort().makeArgs()])
- self.assertEqual(GtkPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--gtk", "--update-gtk", DeprecatedPort().makeArgs()])
+ self.assertEqual(GtkPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--gtk", "--update-gtk", "--no-webkit2", DeprecatedPort().makeArgs()])
+ self.assertEqual(GtkPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--gtk", "--update-gtk", "--no-webkit2", DeprecatedPort().makeArgs()])
+
+ def test_gtk_wk2_port(self):
+ self.assertEqual(GtkWK2Port().flag(), "--port=gtk-wk2")
+ self.assertEqual(GtkWK2Port().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests") + ["--gtk", "-2"])
+ self.assertEqual(GtkWK2Port().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--gtk", "--update-gtk", "--no-webkit1", DeprecatedPort().makeArgs()])
+ self.assertEqual(GtkWK2Port().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--gtk", "--update-gtk", "--no-webkit1", DeprecatedPort().makeArgs()])
def test_efl_port(self):
self.assertEqual(EflPort().flag(), "--port=efl")
- self.assertEqual(EflPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--efl", "--update-efl", DeprecatedPort().makeArgs()])
- self.assertEqual(EflPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--efl", "--update-efl", DeprecatedPort().makeArgs()])
+ self.assertEqual(EflPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--efl", "--update-efl", "--no-webkit2", DeprecatedPort().makeArgs()])
+ self.assertEqual(EflPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--efl", "--update-efl", "--no-webkit2", DeprecatedPort().makeArgs()])
def test_qt_port(self):
self.assertEqual(QtPort().flag(), "--port=qt")
- self.assertEqual(QtPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests"))
- self.assertEqual(QtPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--qt", DeprecatedPort().makeArgs()])
- self.assertEqual(QtPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--qt", DeprecatedPort().makeArgs()])
-
- def test_chromium_port(self):
- self.assertEqual(ChromiumPort().flag(), "--port=chromium")
- self.assertEqual(ChromiumPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("new-run-webkit-tests") + ["--chromium", "--skip-failing-tests"])
- self.assertEqual(ChromiumPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--chromium", "--update-chromium"])
- self.assertEqual(ChromiumPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--chromium", "--update-chromium"])
- self.assertEqual(ChromiumPort().update_webkit_command(), DeprecatedPort().script_shell_command("update-webkit") + ["--chromium"])
-
- def test_chromium_android_port(self):
- self.assertEqual(ChromiumAndroidPort().build_webkit_command(), ChromiumPort().build_webkit_command() + ["--chromium-android"])
- self.assertEqual(ChromiumAndroidPort().update_webkit_command(), ChromiumPort().update_webkit_command() + ["--chromium-android"])
-
- def test_chromium_xvfb_port(self):
- self.assertEqual(ChromiumXVFBPort().run_webkit_tests_command(), ['xvfb-run'] + DeprecatedPort().script_shell_command('new-run-webkit-tests') + ['--chromium', '--skip-failing-tests'])
-
+ self.assertEqual(QtPort().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests") + ["--qt"])
+ self.assertEqual(QtPort().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--qt", "--no-webkit2", DeprecatedPort().makeArgs()])
+ self.assertEqual(QtPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--qt", "--no-webkit2", DeprecatedPort().makeArgs()])
-if __name__ == '__main__':
- unittest.main()
+ def test_qt_wk2_port(self):
+ self.assertEqual(QtWK2Port().flag(), "--port=qt-wk2")
+ self.assertEqual(QtWK2Port().run_webkit_tests_command(), DeprecatedPort().script_shell_command("run-webkit-tests") + ["--qt", "-2"])
+ self.assertEqual(QtWK2Port().build_webkit_command(), DeprecatedPort().script_shell_command("build-webkit") + ["--qt", DeprecatedPort().makeArgs()])
+ self.assertEqual(QtWK2Port().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--qt", DeprecatedPort().makeArgs()])
diff --git a/Tools/Scripts/webkitpy/common/config/urls.py b/Tools/Scripts/webkitpy/common/config/urls.py
index 88ad373a1..311f25061 100644
--- a/Tools/Scripts/webkitpy/common/config/urls.py
+++ b/Tools/Scripts/webkitpy/common/config/urls.py
@@ -37,10 +37,6 @@ def view_revision_url(revision_number):
return "http://trac.webkit.org/changeset/%s" % revision_number
-def chromium_results_zip_url(builder_name):
- return 'http://build.chromium.org/f/chromium/layout_test_results/%s/layout-test-results.zip' % builder_name
-
-chromium_lkgr_url = "http://chromium-status.appspot.com/lkgr"
contribution_guidelines = "http://webkit.org/coding/contributing.html"
bug_server_domain = "webkit.org"
@@ -54,9 +50,6 @@ attachment_url = _bug_server_regex + r"attachment\.cgi\?id=(?P<attachment_id>\d+
direct_attachment_url = r"https?://bug-(?P<bug_id>\d+)-attachments.%s/attachment\.cgi\?id=(?P<attachment_id>\d+)" % bug_server_domain
buildbot_url = "http://build.webkit.org"
-chromium_buildbot_url = "http://build.chromium.org/p/chromium.webkit"
-
-omahaproxy_url = "http://omahaproxy.appspot.com/"
def parse_bug_id(string):
if not string:
diff --git a/Tools/Scripts/webkitpy/common/config/urls_unittest.py b/Tools/Scripts/webkitpy/common/config/urls_unittest.py
index b4bf8978a..c1a082ccb 100644
--- a/Tools/Scripts/webkitpy/common/config/urls_unittest.py
+++ b/Tools/Scripts/webkitpy/common/config/urls_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from .urls import parse_bug_id, parse_attachment_id
@@ -42,12 +42,12 @@ class URLsTest(unittest.TestCase):
self.assertEqual(12345, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?id=12345excludefield=attachmentdata&ctype=xml"))
# Our url parser is super-fragile, but at least we're testing it.
- self.assertEqual(None, parse_bug_id("http://www.webkit.org/b/12345"))
- self.assertEqual(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
- self.assertEqual(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345&excludefield=attachmentdata"))
- self.assertEqual(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&excludefield=attachmentdata&id=12345"))
- self.assertEqual(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&ctype=xml&id=12345"))
- self.assertEqual(None, parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&id=12345&ctype=xml"))
+ self.assertIsNone(parse_bug_id("http://www.webkit.org/b/12345"))
+ self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345"))
+ self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&id=12345&excludefield=attachmentdata"))
+ self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?ctype=xml&excludefield=attachmentdata&id=12345"))
+ self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&ctype=xml&id=12345"))
+ self.assertIsNone(parse_bug_id("http://bugs.webkit.org/show_bug.cgi?excludefield=attachmentdata&id=12345&ctype=xml"))
def test_parse_attachment_id(self):
self.assertEqual(12345, parse_attachment_id("https://bugs.webkit.org/attachment.cgi?id=12345&action=review"))
diff --git a/Tools/Scripts/webkitpy/common/config/watchlist b/Tools/Scripts/webkitpy/common/config/watchlist
index 75f709b76..c247c2dd3 100755..100644
--- a/Tools/Scripts/webkitpy/common/config/watchlist
+++ b/Tools/Scripts/webkitpy/common/config/watchlist
@@ -1,5 +1,3 @@
-# -*- mode: Python;-*-
-#
# When editing this file, please run the following command to make sure you
# haven't introduced any syntax errors:
#
@@ -12,17 +10,6 @@
#
{
"DEFINITIONS": {
- "ChromiumGraphics": {
- "filename": r"Source/WebCore/platform/graphics/chromium/",
- },
- "ChromiumPublicApi": {
- "filename": r"Source/WebKit/chromium/public/"
- r"|Source/Platform/chromium/public/"
- r"|Tools/DumpRenderTree/chromium/TestRunner/public",
- },
- "ChromiumTestRunner": {
- "filename": r"Tools/DumpRenderTree/chromium/TestRunner",
- },
"AppleMacPublicApi": {
"filename": r"Source/WebCore/bindings/objc/PublicDOMInterfaces.h"
},
@@ -33,30 +20,22 @@
r"|Source/WebCore/html/shadow/(SliderThumbElement|TextControlInnerElements)\."
r"|Source/WebCore/rendering/Render(FileUploadControl|ListBox|MenuList|Slider|TextControl.*)\."
},
+ "Geolocation": {
+ "filename": r"Source/WebCore/Modules/geolocation/"
+ r"|Source/WebCore/page/GeolocationClient.h"
+ r"|Source/WebCore/bindings/js/JSGeolocationCustom.cpp"
+ r"|Source/WebCore/platform/mock/GeolocationClientMock.(h|cpp)"
+ r"|Source/WebKit2/WebProcess/Geolocation/",
+ },
"GStreamerGraphics": {
"filename": r"Source/WebCore/platform/graphics/gstreamer/",
},
+ "GStreamerAudio": {
+ "filename": r"Source/WebCore/platform/audio/gstreamer/",
+ },
"WebIDL": {
"filename": r"Source/WebCore/(?!inspector)(?!testing).*\.idl"
},
- "ThreadingFiles": {
- "filename": r"Source/JavaScriptCore/wtf/ThreadSpecific\."
- r"|Source/JavaScriptCore/wtf/ThreadSafeRefCounted\."
- r"|Source/JavaScriptCore/wtf/ThreadingPrimitives\."
- r"|Source/JavaScriptCore/wtf/Threading\."
- r"|Source/WebCore/dom/CrossThreadTask\."
- r"|Source/WebCore/platform/CrossThreadCopier\.",
- },
- "ThreadingUsage": {
- # The intention of this regex is to detect places where people are using common threading mechanisms,
- # so that one can look them over for common mistakes. This list is long and likely to get longer over time.
- # Note the negative look-ahead to avoid new mentions of the files (for builds or includes).
- "more": r"(AllowCrossThreadAccess|AtomicallyInitialize|CrossThreadCopier|CrossThreadRefCounted|Mutex|ReadWriteLock|ThreadCondition|ThreadSafeRefCounted|ThreadSpecific"
- r"|createCallbackTask|crossThreadString|deprecatedTurnOffVerifier|threadsafeCopy)(?!\.(h|cpp))",
- },
- "WatchListScript": {
- "filename": r"Tools/Scripts/webkitpy/common/watchlist/",
- },
"webkitpy": {
"filename": r"Tools/Scripts/webkitpy/",
},
@@ -69,27 +48,9 @@
"SVNScripts": {
"filename": r"Tools/Scripts/svn-.*",
},
- "TestFailures": {
- "filename": r"Tools/BuildSlaveSupport/build.webkit.org-config/public_html/TestFailures/",
- },
- "SecurityCritical": {
- "more": r"[Ss]ecurityOrigin(?!\.(h|cpp))",
- "less": r"[Ss]ecurityOrigin(?!\.(h|cpp))",
- "filename": r"XSS|[Ss]ecurity",
- },
"XSS": {
"filename": r".*XSS",
},
- "SkiaGraphics": {
- "filename": r"Source/WebCore/platform/graphics/skia/"
- r"|Source/WebCore/platform/graphics/filters/skia/",
- },
- "V8Bindings": {
- "filename": r"Source/WebCore/bindings/v8/",
- },
- "BindingsScripts": {
- "filename": r"Source/WebCore/bindings/scripts/",
- },
"FrameLoader": {
"more": r"FrameLoader\.(cpp|h)",
},
@@ -99,8 +60,17 @@
"Rendering": {
"filename": r"Source/WebCore/rendering/",
},
- "StyleChecker": {
- "filename": r"Tools/Scripts/webkitpy/style/",
+ "RenderLayers": {
+ "filename": r"Source/WebCore/rendering/RenderLayer*",
+ },
+ "GraphicsLayer": {
+ "filename": r"Source/WebCore/platform/graphics/GraphicsLayer*",
+ },
+ "CoreAnimation": {
+ "filename": r"Source/WebCore/platform/graphics/ca/",
+ },
+ "Animation": {
+ "filename": r"Source/WebCore/page/animation/",
},
"GtkWebKit2PublicAPI": {
"filename": r"Source/WebKit2/UIProcess/API/gtk/",
@@ -273,8 +243,10 @@
r"|Source/WebCore/rendering/svg",
},
"WebInspectorAPI": {
- "filename": r"Source/WebCore/inspector/*.json"
- r"|Source/WebCore/inspector/*.idl",
+ "filename": r"Source/WebCore/inspector/InjectedScriptSource.js"
+ r"|Source/WebCore/inspector/.+\.json"
+ r"|Source/WebCore/inspector/.+\.idl"
+ r"|Source/WebCore/page/Console.idl",
},
"WebSocket": {
"filename": r"Source/WebCore/Modules/websockets"
@@ -294,76 +266,131 @@
},
"Harfbuzz": {
"filename": r"Source/WebCore/platform/graphics/harfbuzz",
- }
+ },
+ "PerformanceTests": {
+ "filename": r"PerformanceTests"
+ r"|Tools/Scripts/webkitpy/performance_tests",
+ },
+ "GtkBuildSystem": {
+ "filename": r"configure.ac"
+ r"|.*GNUmakefile.(am|features.am.in)",
+ },
+ "ConsoleUsage": {
+ "more": r"[Aa]ddConsoleMessage|reportException|logExceptionToConsole|addMessage|printErrorMessage"
+ },
+ "ContentSecurityPolicyUsage": {
+ "more": r"[Cc]ontentSecurityPolicy(?!\.(h|cpp))",
+ },
+ "ContentSecurityPolicyFiles": {
+ "filename": r"Source/WebCore/page/(Content|DOM)SecurityPolicy\."
+ r"|LayoutTests/http/tests/security/contentSecurityPolicy"
+ },
+ "RegionsDevelopment": {
+ "filename": r"Source/WebCore/rendering/RenderRegion\.(h|cpp)"
+ r"|Source/WebCore/rendering/RenderFlowThread\.(h|cpp)"
+ r"|Source/WebCore/rendering/FlowThreadController\.(h|cpp)"
+ r"|Source/WebCore/rendering/RenderRegionSet\.(h|cpp)"
+ r"|Source/WebCore/rendering/RenderNamedFlowThread\.(h|cpp)"
+ r"|Source/WebCore/rendering/RenderBoxRegionInfo\.h"
+ r"|Source/WebCore/dom/WebKitNamedFlow\.(h|cpp|idl)"
+ r"|Source/WebCore/dom/(DOM)?NamedFlowCollection\.(h|cpp|idl)"
+ r"|Source/WebCore/css/WebKitCSSRegionRule\.(h|cpp|idl)"
+ r"|LayoutTests/fast/regions",
+ },
+ "RegionsExpectationsMore": {
+ "filename": r"LayoutTests/platform/.*TestExpectations",
+ "more": r"fast/regions/.*\.html",
+ },
+ "RegionsExpectationsLess": {
+ "filename": r"LayoutTests/platform/.*TestExpectations",
+ "less": r"fast/regions/.*\.html",
+ },
+ "RegionsUsage": {
+ "more": r"(RenderRegion|RenderFlowThread|RenderNamedFlowThread)(?!\.(h|cpp))",
+ },
+ "IndexedDB": {
+ "filename": r"Source/WebCore/Modules/indexeddb"
+ r"|Source/WebCore/bindings/.*IDB.*\.(h|cpp)"
+ r"|Source/WebCore/bindings/.*SerializedScriptValue.*\.(h|cpp)"
+ r"|Source/WebCore/platform/leveldb"
+ r"|LayoutTests/storage/indexeddb"
+ r"|LayoutTests/platform/.*/storage/indexeddb",
+ },
+ "BindingsScripts": {
+ "filename": r"Source/WebCore/bindings/scripts/",
+ },
+ "cURLNetwork": {
+ "filename": r"Source/WebCore/platform/network/curl/",
+ },
},
"CC_RULES": {
# Note: All email addresses listed must be registered with bugzilla.
# Specifically, levin@chromium.org and levin+threading@chromium.org are
# two different accounts as far as bugzilla is concerned.
- "Accessibility": [ "cfleizach@apple.com", "dmazzoni@google.com", "apinheiro@igalia.com", "jdiggs@igalia.com" ],
+ "Accessibility": [ "cfleizach@apple.com", "dmazzoni@google.com", "apinheiro@igalia.com", "jdiggs@igalia.com", "aboxhall@chromium.org", "mario@webkit.org" ],
+ "Animation" : [ "simon.fraser@apple.com", "dino@apple.com", "dstockwell@chromium.org" ],
"AppleMacPublicApi": [ "timothy@apple.com" ],
- "Battery": [ "gyuyoung.kim@samsung.com" ],
+ "Battery": [ "gyuyoung.kim@webkit.org", "dchris@gmail.com" ],
+ "BindingsScripts": [ "dchris@gmail.com" ],
"BlackBerry": [ "mifenton@rim.com", "rwlbuis@gmail.com", "tonikitoo@webkit.org" ],
"Cairo": [ "dominik.rottsches@intel.com" ],
- "CMake": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ],
- "CoordinatedGraphics" : [ "noam@webkit.org", "zeno@webkit.org" ],
- "CSS": [ "alexis@webkit.org", "macpherson@chromium.org", "cmarcelo@webkit.org", "ojan@chromium.org"],
- "ChromiumGraphics": [ "jamesr@chromium.org", "cc-bugs@chromium.org" ],
- "ChromiumPublicApi": [ "abarth@webkit.org", "dglazkov@chromium.org", "fishd@chromium.org", "jamesr@chromium.org", "tkent+wkapi@chromium.org" ],
- "ChromiumTestRunner": [ "jochen@chromium.org" ],
- "DOM": [ "ojan@chromium.org" ],
+ "CMake": [ "rakuco@webkit.org", "gyuyoung.kim@webkit.org" ],
+ "CoordinatedGraphics" : [ "noam@webkit.org", "zeno@webkit.org", "cmarcelo@webkit.org", "luiz@webkit.org" ],
+ "ConsoleUsage" : [ "mkwst@chromium.org" ],
+ "ContentSecurityPolicyFiles|ContentSecurityPolicyUsage" : [ "mkwst@chromium.org" ],
+ "CoreAnimation" : [ "simon.fraser@apple.com" ],
+ "CSS": [ "alexis@webkit.org", "macpherson@chromium.org", "esprehn+autocc@chromium.org", "glenn@skynav.com" ],
+ "cURLNetwork": [ "galpeter@inf.u-szeged.hu" ],
+ "DOM": [ "esprehn+autocc@chromium.org", "kangil.han@samsung.com" ],
"DOMAttributes": [ "cmarcelo@webkit.org", ],
- "EFL": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ],
- "EFLWebKit2PlatformSpecific": [ "gyuyoung.kim@samsung.com", "rakuco@webkit.org" ],
- "EFLWebKit2PublicAPI": [ "gyuyoung.kim@samsung.com", "rakuco@webkit.org" ],
+ "EFL": [ "rakuco@webkit.org", "gyuyoung.kim@webkit.org", "dchris@gmail.com" ],
+ "EFLWebKit2PlatformSpecific": [ "gyuyoung.kim@webkit.org", "rakuco@webkit.org", "dchris@gmail.com" ],
+ "EFLWebKit2PublicAPI": [ "gyuyoung.kim@webkit.org", "rakuco@webkit.org", "dchris@gmail.com" ],
"Editing": [ "mifenton@rim.com" ],
- "Filters": [ "dino@apple.com" ],
+ "Filters": [ "dino@apple.com", "kondapallykalyan@gmail.com" ],
"Forms": [ "tkent@chromium.org", "mifenton@rim.com" ],
- "FrameLoader": [ "abarth@webkit.org", "japhet@chromium.org" ],
+ "FrameLoader": [ "japhet@chromium.org" ],
+ "Geolocation": [ "benjamin@webkit.org" ],
+ "GraphicsLayer": [ "simon.fraser@apple.com", "kondapallykalyan@gmail.com" ],
"GStreamerGraphics": [ "alexis@webkit.org", "pnormand@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
+ "GStreamerAudio": [ "pnormand@igalia.com", "dchris@gmail.com" ],
+ "GtkBuildSystem": [ "zandobersek@gmail.com" ],
"GtkWebKit2PublicAPI": [ "cgarcia@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
"Harfbuzz": [ "dominik.rottsches@intel.com" ],
- "HTML": [ "ojan@chromium.org" ],
+ "HTML": [ "esprehn+autocc@chromium.org" ],
+ "IndexedDB": [ "alecflett@chromium.org", "jsbell@chromium.org" ],
"Loader": [ "japhet@chromium.org" ],
- "MathML": [ "dbarton@mathscribe.com" ],
- "Media": [ "feature-media-reviews@chromium.org", "eric.carlson@apple.com" ],
+ "MathML": [ "dbarton@mathscribe.com", "mrobinson@webkit.org", "fred.wang@free.fr" ],
+ "Media": [ "eric.carlson@apple.com", "jer.noble@apple.com", "glenn@skynav.com" ],
"MediaStream": [ "tommyw@google.com", "hta@google.com" ],
- "NetworkInfo": [ "gyuyoung.kim@samsung.com" ],
- "OpenGL" : [ "noam@webkit.org", "dino@apple.com" ],
- "QtBuildSystem" : [ "vestbo@webkit.org", "abecsi@webkit.org" ],
+ "NetworkInfo": [ "gyuyoung.kim@webkit.org", "dchris@gmail.com" ],
+ "OpenGL" : [ "noam@webkit.org", "dino@apple.com", "kondapallykalyan@gmail.com" ],
+ "PerformanceTests": [ "rniwa@webkit.org" ],
+ "QtBuildSystem" : [ "abecsi@webkit.org" ],
"QtGraphics" : [ "noam@webkit.org" ],
"QtWebKit2PlatformSpecific": [ "alexis@webkit.org", "cmarcelo@webkit.org", "abecsi@webkit.org" ],
"QtWebKit2PublicAPI": [ "alexis@webkit.org", "cmarcelo@webkit.org", "abecsi@webkit.org" ],
- "Rendering": [ "eric@webkit.org", "ojan@chromium.org" ],
+ "RegionsDevelopment|RegionsExpectationsMore|RegionsExpectationsLess|RegionsUsage": [ "WebkitBugTracker@adobe.com" ],
+ "Rendering": [ "esprehn+autocc@chromium.org", "glenn@skynav.com", "kondapallykalyan@gmail.com" ],
+ "RenderLayers" : [ "simon.fraser@apple.com", "kondapallykalyan@gmail.com" ],
"SVG": ["schenney@chromium.org", "pdr@google.com", "fmalita@chromium.org", "dominik.rottsches@intel.com" ],
"SVNScripts": [ "dbates@webkit.org" ],
- "ScrollingCoordinator": [ "andersca@apple.com", "jamesr@chromium.org", "tonikitoo@webkit.org" ],
- "SecurityCritical": [ "abarth@webkit.org" ],
- "SkiaGraphics": [ "senorblanco@chromium.org" ],
+ "ScrollingCoordinator": [ "andersca@apple.com", "jamesr@chromium.org", "tonikitoo@webkit.org", "cmarcelo@webkit.org", "luiz@webkit.org" ],
"Selectors": [ "allan.jensen@digia.com" ],
- "SoupNetwork": [ "rakuco@webkit.org", "gns@gnome.org", "mrobinson@webkit.org", "danw@gnome.org" ],
- "StyleChecker": [ "levin@chromium.org", ],
- "TestFailures": [ "abarth@webkit.org", "dglazkov@chromium.org", "ojan@chromium.org" ],
- "TextureMapper" : [ "noam@webkit.org" ],
- "ThreadingFiles|ThreadingUsage": [ "levin+threading@chromium.org", ],
+ "SoupNetwork": [ "rakuco@webkit.org", "gns@gnome.org", "mrobinson@webkit.org", "danw@gnome.org", "dchris@gmail.com" ],
+ "TextureMapper" : [ "noam@webkit.org", "cmarcelo@webkit.org", "luiz@webkit.org", "kondapallykalyan@gmail.com" ],
"TouchAdjustment" : [ "allan.jensen@digia.com" ],
- "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org" ],
- "WTF": [ "benjamin@webkit.org", "ojan@chromium.org"],
- "WatchListScript": [ "levin+watchlist@chromium.org", ],
- "WebGL": [ "dino@apple.com" ],
- "WebIDL": [ "abarth@webkit.org", "ojan@chromium.org" ],
- "WebInspectorAPI": [ "timothy@apple.com", "joepeck@webkit.org" ],
+ "WTF": [ "benjamin@webkit.org", "cmarcelo@webkit.org" ],
+ "WebGL": [ "dino@apple.com", "kondapallykalyan@gmail.com" ],
+ "WebIDL": [ "esprehn+autocc@chromium.org", "dchris@gmail.com", "kondapallykalyan@gmail.com" ],
+ "WebInspectorAPI": [ "timothy@apple.com", "joepeck@webkit.org", "graouts@apple.com" ],
"WebKitGTKTranslations": [ "gns@gnome.org", "mrobinson@webkit.org" ],
- "WebSocket": [ "yutak@chromium.org" ],
- "XSS": [ "dbates@webkit.org", "ojan@chromium.org" ],
+ "WebSocket": [ "yutak@chromium.org", "toyoshim+watchlist@chromium.org" ],
+ "XSS": [ "dbates@webkit.org" ],
"webkitperl": [ "dbates@webkit.org" ],
- "webkitpy": [ "abarth@webkit.org", "dpranke@chromium.org" ],
+ "webkitpy": [ "dpranke@chromium.org", "glenn@skynav.com" ],
},
"MESSAGE_RULES": {
- "ChromiumPublicApi": [ "Please wait for approval from abarth@webkit.org, dglazkov@chromium.org, "
- "fishd@chromium.org, jamesr@chromium.org or tkent@chromium.org before "
- "submitting, as this patch contains changes to the Chromium public API. "
- "See also https://trac.webkit.org/wiki/ChromiumWebKitAPI." ],
"AppleMacPublicApi": [ "Please wait for approval from timothy@apple.com (or another member "
"of the Apple Safari Team) before submitting "
"because this patch contains changes to the Apple Mac "
diff --git a/Tools/Scripts/webkitpy/common/editdistance_unittest.py b/Tools/Scripts/webkitpy/common/editdistance_unittest.py
index 4ae6441bf..1f67572b4 100644
--- a/Tools/Scripts/webkitpy/common/editdistance_unittest.py
+++ b/Tools/Scripts/webkitpy/common/editdistance_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.editdistance import edit_distance
diff --git a/Tools/Scripts/webkitpy/common/find_files.py b/Tools/Scripts/webkitpy/common/find_files.py
index b65e77f9f..7a10120ef 100644
--- a/Tools/Scripts/webkitpy/common/find_files.py
+++ b/Tools/Scripts/webkitpy/common/find_files.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/common/find_files_unittest.py b/Tools/Scripts/webkitpy/common/find_files_unittest.py
index 641251f0e..4c0a7cdc4 100644
--- a/Tools/Scripts/webkitpy/common/find_files_unittest.py
+++ b/Tools/Scripts/webkitpy/common/find_files_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem import FileSystem
import find_files
@@ -59,7 +59,3 @@ class TestWinNormalize(unittest.TestCase):
if sys.platform != 'win32':
return
self.assert_filesystem_normalizes(FileSystem())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/host.py b/Tools/Scripts/webkitpy/common/host.py
index 7dd5ad024..022c13bf8 100644
--- a/Tools/Scripts/webkitpy/common/host.py
+++ b/Tools/Scripts/webkitpy/common/host.py
@@ -35,10 +35,9 @@ from webkitpy.common.checkout import Checkout
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.memoized import memoized
from webkitpy.common.net import bugzilla, buildbot, web
-from webkitpy.common.net.buildbot.chromiumbuildbot import ChromiumBuildBot
from webkitpy.common.system.systemhost import SystemHost
-from webkitpy.common.watchlist.watchlistloader import WatchListLoader
-from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.common.watchlist.watchlistparser import WatchListParser
+from webkitpy.port.factory import PortFactory
_log = logging.getLogger(__name__)
@@ -79,55 +78,7 @@ class Host(SystemHost):
os.environ['LC_MESSAGES'] = 'en_US.UTF-8'
os.environ['LC_ALL'] = ''
- # FIXME: This is a horrible, horrible hack for ChromiumWin and should be removed.
- # Maybe this belongs in SVN in some more generic "find the svn binary" codepath?
- # Or possibly Executive should have a way to emulate shell path-lookups?
- # FIXME: Unclear how to test this, since it currently mutates global state on SVN.
- def _engage_awesome_windows_hacks(self):
- try:
- self.executive.run_command(['svn', 'help'])
- except OSError, e:
- try:
- self.executive.run_command(['svn.bat', 'help'])
- # Chromium Win uses the depot_tools package, which contains a number
- # of development tools, including Python and svn. Instead of using a
- # real svn executable, depot_tools indirects via a batch file, called
- # svn.bat. This batch file allows depot_tools to auto-update the real
- # svn executable, which is contained in a subdirectory.
- #
- # That's all fine and good, except that subprocess.popen can detect
- # the difference between a real svn executable and batch file when we
- # don't provide use shell=True. Rather than use shell=True on Windows,
- # We hack the svn.bat name into the SVN class.
- _log.debug('Engaging svn.bat Windows hack.')
- from webkitpy.common.checkout.scm.svn import SVN
- SVN.executable_name = 'svn.bat'
- except OSError, e:
- _log.debug('Failed to engage svn.bat Windows hack.')
- try:
- self.executive.run_command(['git', 'help'])
- except OSError, e:
- try:
- self.executive.run_command(['git.bat', 'help'])
- # Chromium Win uses the depot_tools package, which contains a number
- # of development tools, including Python and git. Instead of using a
- # real git executable, depot_tools indirects via a batch file, called
- # git.bat. This batch file allows depot_tools to auto-update the real
- # git executable, which is contained in a subdirectory.
- #
- # That's all fine and good, except that subprocess.popen can detect
- # the difference between a real git executable and batch file when we
- # don't provide use shell=True. Rather than use shell=True on Windows,
- # We hack the git.bat name into the SVN class.
- _log.debug('Engaging git.bat Windows hack.')
- from webkitpy.common.checkout.scm.git import Git
- Git.executable_name = 'git.bat'
- except OSError, e:
- _log.debug('Failed to engage git.bat Windows hack.')
-
def initialize_scm(self, patch_directories=None):
- if sys.platform == "win32":
- self._engage_awesome_windows_hacks()
detector = SCMDetector(self.filesystem, self.executive)
self._scm = detector.default_scm(patch_directories)
self._checkout = Checkout(self.scm())
@@ -138,15 +89,12 @@ class Host(SystemHost):
def checkout(self):
return self._checkout
- def buildbot_for_builder_name(self, name):
- if self.port_factory.get_from_builder_name(name).is_chromium():
- return self.chromium_buildbot()
- return self.buildbot
-
- @memoized
- def chromium_buildbot(self):
- return ChromiumBuildBot()
-
@memoized
def watch_list(self):
- return WatchListLoader(self.filesystem).load()
+ config_path = self.filesystem.dirname(self.filesystem.path_to_module('webkitpy.common.config'))
+ watch_list_full_path = self.filesystem.join(config_path, 'watchlist')
+ if not self.filesystem.exists(watch_list_full_path):
+ raise Exception('Watch list file (%s) not found.' % watch_list_full_path)
+
+ watch_list_contents = self.filesystem.read_text_file(watch_list_full_path)
+ return WatchListParser().parse(watch_list_contents)
diff --git a/Tools/Scripts/webkitpy/common/host_mock.py b/Tools/Scripts/webkitpy/common/host_mock.py
index 8b508bf8f..be238463d 100644
--- a/Tools/Scripts/webkitpy/common/host_mock.py
+++ b/Tools/Scripts/webkitpy/common/host_mock.py
@@ -35,15 +35,15 @@ from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.common.watchlist.watchlist_mock import MockWatchList
# New-style ports need to move down into webkitpy.common.
-from webkitpy.layout_tests.port.factory import PortFactory
-from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem
+from webkitpy.port.factory import PortFactory
+from webkitpy.port.test import add_unit_tests_to_mock_filesystem
class MockHost(MockSystemHost):
- def __init__(self, log_executive=False, executive_throws_when_run=None, initialize_scm_by_default=True):
+ def __init__(self, log_executive=False, executive_throws_when_run=None, initialize_scm_by_default=True, web=None):
MockSystemHost.__init__(self, log_executive, executive_throws_when_run)
add_unit_tests_to_mock_filesystem(self.filesystem)
- self.web = MockWeb()
+ self.web = web or MockWeb()
self._checkout = MockCheckout()
self._scm = None
@@ -53,7 +53,6 @@ class MockHost(MockSystemHost):
self.initialize_scm()
self.bugs = MockBugzilla()
self.buildbot = MockBuildBot()
- self._chromium_buildbot = MockBuildBot()
# Note: We're using a real PortFactory here. Tests which don't wish to depend
# on the list of known ports should override this with a MockPortFactory.
@@ -73,9 +72,6 @@ class MockHost(MockSystemHost):
def checkout(self):
return self._checkout
- def chromium_buildbot(self):
- return self._chromium_buildbot
-
def watch_list(self):
return self._watch_list
diff --git a/Tools/Scripts/webkitpy/common/lru_cache.py b/Tools/Scripts/webkitpy/common/lru_cache.py
index 4178d0f7d..02a3d1c35 100644
--- a/Tools/Scripts/webkitpy/common/lru_cache.py
+++ b/Tools/Scripts/webkitpy/common/lru_cache.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/common/lru_cache_unittest.py b/Tools/Scripts/webkitpy/common/lru_cache_unittest.py
index 44a09e661..96ca2095c 100644
--- a/Tools/Scripts/webkitpy/common/lru_cache_unittest.py
+++ b/Tools/Scripts/webkitpy/common/lru_cache_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -25,7 +24,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common import lru_cache
@@ -92,7 +91,3 @@ class LRUCacheTest(unittest.TestCase):
def test_set_again(self):
self.lru['key_1'] = 'item_4'
self.assertEqual(set(self.lru.items()), set([('key_1', 'item_4'), ('key_3', 'item_3'), ('key_2', 'item_2')]))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/memoized_unittest.py b/Tools/Scripts/webkitpy/common/memoized_unittest.py
index dd7c793d8..af406dfea 100644
--- a/Tools/Scripts/webkitpy/common/memoized_unittest.py
+++ b/Tools/Scripts/webkitpy/common/memoized_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.memoized import memoized
diff --git a/Tools/Scripts/webkitpy/common/message_pool.py b/Tools/Scripts/webkitpy/common/message_pool.py
index 2e1e85e5c..03056cf64 100644
--- a/Tools/Scripts/webkitpy/common/message_pool.py
+++ b/Tools/Scripts/webkitpy/common/message_pool.py
@@ -184,7 +184,7 @@ class _MessagePool(object):
pass
-class WorkerException(Exception):
+class WorkerException(BaseException):
"""Raised when we receive an unexpected/unknown exception from a worker."""
pass
diff --git a/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py b/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
index 11897764e..366c41314 100755..100644
--- a/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
+++ b/Tools/Scripts/webkitpy/common/multiprocessing_bootstrap.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
@@ -58,7 +57,8 @@ def run(*parts):
# doesn't need to do it and their process id as reported by
# subprocess.Popen is not jhbuild's.
if '--gtk' in sys.argv[1:] and os.path.exists(os.path.join(script_dir, '..', '..', 'WebKitBuild', 'Dependencies')):
- cmd.insert(1, os.path.join(script_dir, '..', 'gtk', 'run-with-jhbuild'))
+ prefix = [os.path.join(script_dir, '..', 'jhbuild', 'jhbuild-wrapper'), '--gtk', 'run']
+ cmd = prefix + cmd
proc = subprocess.Popen(cmd, env=env)
try:
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
index 4bf8ec61e..70caef330 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
@@ -28,6 +28,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import re
+
from .attachment import Attachment
@@ -123,3 +125,12 @@ class Bug(object):
return True
return False
+ def commit_revision(self):
+ # Sort the comments in reverse order as we want the latest committed revision.
+ r = re.compile("Committed r(?P<svn_revision>\d+)")
+ for comment in sorted(self.comments(), reverse=True):
+ rev = r.search(comment['text'])
+ if rev:
+ return int(rev.group('svn_revision'))
+
+ return None
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py
index f20c6010c..b90b915a8 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from .bug import Bug
@@ -45,3 +45,39 @@ class BugTest(unittest.TestCase):
bugzilla=None)
self.assertTrue(bug.is_in_comments("Message3."))
self.assertFalse(bug.is_in_comments("Message."))
+
+ def test_commit_revision(self):
+ bug = Bug({"comments": []}, bugzilla=None)
+ self.assertEqual(bug.commit_revision(), None)
+
+ bug = Bug({"comments": [
+ {"text": "Comment 1"},
+ {"text": "Comment 2"},
+ ]}, bugzilla=None)
+ self.assertEqual(bug.commit_revision(), None)
+
+ bug = Bug({"comments": [
+ {"text": "Committed r138776: <http://trac.webkit.org/changeset/138776>"},
+ ]}, bugzilla=None)
+ self.assertEqual(bug.commit_revision(), 138776)
+
+ bug = Bug({"comments": [
+ {"text": "(From update of attachment 181269) Clearing flags on attachment: 181269 Committed r138776: <http://trac.webkit.org/changeset/138776>"},
+ ]}, bugzilla=None)
+ self.assertEqual(bug.commit_revision(), 138776)
+
+ bug = Bug({"comments": [
+ {"text": "Comment before"},
+ {"text": "(From update of attachment 181269) Clearing flags on attachment: 181269 Committed r138776: <http://trac.webkit.org/changeset/138776>"},
+ {"text": "Comment after"},
+ ]}, bugzilla=None)
+ self.assertEqual(bug.commit_revision(), 138776)
+
+ bug = Bug({"comments": [
+ {"text": "Comment before"},
+ {"text": "(From update of attachment 181269) Clearing flags on attachment: 181269 Committed r138776: <http://trac.webkit.org/changeset/138776>"},
+ {"text": "Comment Middle"},
+ {"text": "(From update of attachment 181280) Clearing flags on attachment: 181280 Committed r138976: <http://trac.webkit.org/changeset/138976>"},
+ {"text": "Comment After"},
+ ]}, bugzilla=None)
+ self.assertEqual(bug.commit_revision(), 138976)
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
index 957f04dca..bcd41b78e 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
@@ -531,7 +531,7 @@ class Bugzilla(object):
# FIXME: Use enum instead of two booleans
def _commit_queue_flag(self, mark_for_landing, mark_for_commit_queue):
if mark_for_landing:
- user = self.committers.account_by_email(self.username)
+ user = self.committers.contributor_by_email(self.username)
mark_for_commit_queue = True
if not user:
_log.warning("Your Bugzilla login is not listed in committers.py. Uploading with cq? instead of cq+")
@@ -629,7 +629,7 @@ class Bugzilla(object):
# FIXME: There has to be a more concise way to write this method.
def _check_create_bug_response(self, response_html):
- match = re.search("<title>Bug (?P<bug_id>\d+) Submitted</title>",
+ match = re.search("<title>Bug (?P<bug_id>\d+) Submitted[^<]*</title>",
response_html)
if match:
return match.group('bug_id')
@@ -729,18 +729,12 @@ class Bugzilla(object):
attachment_id,
flag_name,
flag_value,
- comment_text=None,
- additional_comment_text=None):
+ comment_text=None):
# FIXME: We need a way to test this function on a live bugzilla
# instance.
self.authenticate()
-
- # FIXME: additional_comment_text seems useless and should be merged into comment-text.
- if additional_comment_text:
- comment_text += "\n\n%s" % additional_comment_text
_log.info(comment_text)
-
self.browser.open(self.attachment_url_for_id(attachment_id, 'edit'))
self.browser.select_form(nr=1)
@@ -817,11 +811,7 @@ class Bugzilla(object):
if not self._has_control(self.browser, "assigned_to"):
_log.warning("""Failed to assign bug to you (can't find assigned_to) control.
-Do you have EditBugs privileges at bugs.webkit.org?
-https://bugs.webkit.org/userprefs.cgi?tab=permissions
-
-If not, you should email webkit-committers@lists.webkit.org or ask in #webkit
-for someone to add EditBugs to your bugs.webkit.org account.""")
+Ignore this message if you don't have EditBugs privileges (https://bugs.webkit.org/userprefs.cgi?tab=permissions)""")
return
if comment_text:
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
index 473a9fa6e..2447ed284 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
@@ -385,10 +385,9 @@ class MockBugzilla(object):
attachment_id,
flag_name,
flag_value,
- comment_text=None,
- additional_comment_text=None):
- _log.info("MOCK setting flag '%s' to '%s' on attachment '%s' with comment '%s' and additional comment '%s'" % (
- flag_name, flag_value, attachment_id, comment_text, additional_comment_text))
+ comment_text=None):
+ _log.info("MOCK setting flag '%s' to '%s' on attachment '%s' with comment '%s'" % (
+ flag_name, flag_value, attachment_id, comment_text))
def post_comment_to_bug(self, bug_id, comment_text, cc=None):
_log.info("MOCK bug comment: bug_id=%s, cc=%s\n--- Begin comment ---\n%s\n--- End comment ---\n" % (
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
index 90e4c83fc..7c5a56d06 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
import datetime
import StringIO
@@ -85,9 +85,9 @@ class BugzillaTest(unittest.TestCase):
def test_url_creation(self):
# FIXME: These would be all better as doctests
bugs = Bugzilla()
- self.assertEqual(None, bugs.bug_url_for_bug_id(None))
- self.assertEqual(None, bugs.short_bug_url_for_bug_id(None))
- self.assertEqual(None, bugs.attachment_url_for_id(None))
+ self.assertIsNone(bugs.bug_url_for_bug_id(None))
+ self.assertIsNone(bugs.short_bug_url_for_bug_id(None))
+ self.assertIsNone(bugs.attachment_url_for_id(None))
def test_parse_bug_id(self):
# Test that we can parse the urls we produce.
@@ -198,7 +198,7 @@ Ignore this bug. Just for testing failure modes of webkit-patch and the commit-
# FIXME: This should move to a central location and be shared by more unit tests.
def _assert_dictionaries_equal(self, actual, expected):
# Make sure we aren't parsing more or less than we expect
- self.assertEqual(sorted(actual.keys()), sorted(expected.keys()))
+ self.assertItemsEqual(actual.keys(), expected.keys())
for key, expected_value in expected.items():
self.assertEqual(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
@@ -305,8 +305,7 @@ Ignore this bug. Just for testing failure modes of webkit-patch and the commit-
bugzilla.committers = CommitterList(reviewers=[Reviewer("WebKit Reviewer", "reviewer@webkit.org")],
committers=[Committer("WebKit Committer", "committer@webkit.org")],
- contributors=[Contributor("WebKit Contributor", "contributor@webkit.org")],
- watchers=[])
+ contributors=[Contributor("WebKit Contributor", "contributor@webkit.org")])
def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None):
bugzilla.username = username
@@ -337,6 +336,15 @@ Ignore this bug. Just for testing failure modes of webkit-patch and the commit-
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='reviewer@webkit.org')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='reviewer@webkit.org')
+ def test__check_create_bug_response(self):
+ bugzilla = Bugzilla()
+
+ title_html_bugzilla_323 = "<title>Bug 101640 Submitted</title>"
+ self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_323), '101640')
+
+ title_html_bugzilla_425 = "<title>Bug 101640 Submitted &ndash; Testing webkit-patch again</title>"
+ self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_425), '101640')
+
class BugzillaQueriesTest(unittest.TestCase):
_sample_request_page = """
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
index d6804d5b7..495159630 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot.py
@@ -85,10 +85,6 @@ class Builder(object):
def fetch_layout_test_results(self, results_url):
# FIXME: This should cache that the result was a 404 and stop hitting the network.
results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "full_results.json"))
- if not results_file:
- results_file = NetworkTransaction(convert_404_to_None=True).run(lambda: self._fetch_file_from_results(results_url, "results.html"))
-
- # results_from_string accepts either ORWT html or NRWT json.
return LayoutTestResults.results_from_string(results_file)
def url_encoded_name(self):
@@ -135,7 +131,7 @@ class Builder(object):
return form.find_control("username")
except Exception, e:
return False
- # ignore false positives for missing Browser methods - pylint: disable-msg=E1102
+ # ignore false positives for missing Browser methods - pylint: disable=E1102
self._browser.open(self.url())
self._browser.select_form(predicate=predicate)
self._browser["username"] = username
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
index 547a8e6ff..26b7b9799 100644
--- a/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.net.buildbot import BuildBot, Builder, Build
@@ -71,14 +71,14 @@ class BuilderTest(unittest.TestCase):
self.assertEqual(regression_window.failing_build().revision(), 1004)
regression_window = self.builder.find_regression_window(self.builder.build(10), look_back_limit=2)
- self.assertEqual(regression_window.build_before_failure(), None)
+ self.assertIsNone(regression_window.build_before_failure())
self.assertEqual(regression_window.failing_build().revision(), 1008)
def test_none_build(self):
self.builder._fetch_build = lambda build_number: None
regression_window = self.builder.find_regression_window(self.builder.build(10))
- self.assertEqual(regression_window.build_before_failure(), None)
- self.assertEqual(regression_window.failing_build(), None)
+ self.assertIsNone(regression_window.build_before_failure())
+ self.assertIsNone(regression_window.failing_build())
def test_flaky_tests(self):
self._install_fetch_build(lambda build_number: ["test1"] if build_number % 2 else ["test2"])
@@ -106,12 +106,12 @@ class BuilderTest(unittest.TestCase):
def test_find_blameworthy_regression_window(self):
self.assertEqual(self.builder.find_blameworthy_regression_window(10).revisions(), [1004])
- self.assertEqual(self.builder.find_blameworthy_regression_window(10, look_back_limit=2), None)
+ self.assertIsNone(self.builder.find_blameworthy_regression_window(10, look_back_limit=2))
# Flakey test avoidance requires at least 2 red builds:
- self.assertEqual(self.builder.find_blameworthy_regression_window(4), None)
+ self.assertIsNone(self.builder.find_blameworthy_regression_window(4))
self.assertEqual(self.builder.find_blameworthy_regression_window(4, avoid_flakey_tests=False).revisions(), [1004])
# Green builder:
- self.assertEqual(self.builder.find_blameworthy_regression_window(3), None)
+ self.assertIsNone(self.builder.find_blameworthy_regression_window(3))
def test_build_caching(self):
self.assertEqual(self.builder.build(10), self.builder.build(10))
@@ -148,7 +148,7 @@ class BuilderTest(unittest.TestCase):
}
return build_dictionary
buildbot._fetch_build_dictionary = mock_fetch_build_dictionary
- self.assertNotEqual(builder._fetch_build(1), None)
+ self.assertIsNotNone(builder._fetch_build(1))
class BuildTest(unittest.TestCase):
@@ -158,7 +158,7 @@ class BuildTest(unittest.TestCase):
builder._fetch_file_from_results = lambda results_url, file_name: None
build = Build(builder, None, None, None)
# Test that layout_test_results() returns None if the fetch fails.
- self.assertEqual(build.layout_test_results(), None)
+ self.assertIsNone(build.layout_test_results())
class BuildBotTest(unittest.TestCase):
@@ -263,16 +263,16 @@ class BuildBotTest(unittest.TestCase):
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/10")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r20%20%2810%29")
self.assertEqual(build.revision(), 20)
- self.assertEqual(build.is_green(), True)
+ self.assertTrue(build.is_green())
build = build.previous_build()
self.assertEqual(build.builder(), builder)
self.assertEqual(build.url(), "http://build.webkit.org/builders/Test%20Builder/builds/9")
self.assertEqual(build.results_url(), "http://build.webkit.org/results/Test%20Builder/r18%20%289%29")
self.assertEqual(build.revision(), 18)
- self.assertEqual(build.is_green(), False)
+ self.assertFalse(build.is_green())
- self.assertEqual(builder.build(None), None)
+ self.assertIsNone(builder.build(None))
_example_directory_listing = '''
<h1>Directory listing for /results/SnowLeopard Intel Leaks/</h1>
@@ -473,7 +473,3 @@ class BuildBotTest(unittest.TestCase):
b = Build(None, 123, 123, False)
b.results_url = self.results_url
self.assertEqual("some-url.zip", b.results_zip_url())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py b/Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py
deleted file mode 100644
index 5030bba48..000000000
--- a/Tools/Scripts/webkitpy/common/net/buildbot/chromiumbuildbot.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (c) 2011, Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import webkitpy.common.config.urls as config_urls
-from webkitpy.common.net.buildbot.buildbot import Builder, BuildBot
-# FIXME: builders should probably be in webkitpy.common.config.
-from webkitpy.layout_tests.port.builders import builder_path_from_name
-
-
-class ChromiumBuilder(Builder):
- # The build.chromium.org builders store their results in a different
- # location than the build.webkit.org builders.
- def results_url(self):
- return "http://build.chromium.org/f/chromium/layout_test_results/%s" % builder_path_from_name(self._name)
-
- def accumulated_results_url(self):
- return self.results_url() + "/results/layout-test-results"
-
-
-class ChromiumBuildBot(BuildBot):
- _builder_factory = ChromiumBuilder
- _default_url = config_urls.chromium_buildbot_url
diff --git a/Tools/Scripts/webkitpy/common/net/credentials_unittest.py b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
index 3659d69d1..beafa5880 100644
--- a/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
@@ -28,7 +28,7 @@
import os
import tempfile
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.outputcapture import OutputCapture
@@ -92,7 +92,7 @@ password: "SECRETSAUCE"
def _is_mac_os_x(self):
return False
credentials = FakeCredentials("bugs.webkit.org")
- self.assertEqual(credentials._is_mac_os_x(), False)
+ self.assertFalse(credentials._is_mac_os_x())
self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None])
def test_security_output_parse(self):
@@ -109,7 +109,7 @@ password: "SECRETSAUCE"
# by the test case CredentialsTest._assert_security_call (below).
outputCapture = OutputCapture()
outputCapture.capture_output()
- self.assertEqual(credentials._run_security_tool(), None)
+ self.assertIsNone(credentials._run_security_tool())
outputCapture.restore_output()
def _assert_security_call(self, username=None):
@@ -206,7 +206,3 @@ password: "SECRETSAUCE"
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(FakeUser), ("test@webkit.org", "NOMNOMNOM"))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
index bd41032cb..0bede97e6 100644
--- a/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/failuremap_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.buildbot import Build
from webkitpy.common.net.failuremap import *
diff --git a/Tools/Scripts/webkitpy/common/net/file_uploader.py b/Tools/Scripts/webkitpy/common/net/file_uploader.py
index 9b220b0d6..871295b11 100644
--- a/Tools/Scripts/webkitpy/common/net/file_uploader.py
+++ b/Tools/Scripts/webkitpy/common/net/file_uploader.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/common/net/htdigestparser.py b/Tools/Scripts/webkitpy/common/net/htdigestparser.py
deleted file mode 100644
index ee7d5405d..000000000
--- a/Tools/Scripts/webkitpy/common/net/htdigestparser.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# Copyright (C) 2011 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
-# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""htdigestparser - a parser for htdigest files"""
-
-import hashlib
-import string
-
-
-class HTDigestParser(object):
- def __init__(self, digest_file):
- self._entries = self.parse_file(digest_file)
-
- def authenticate(self, username, realm, password):
- hashed_password = hashlib.md5(':'.join((username, realm, password))).hexdigest()
- return [username, realm, hashed_password] in self.entries()
-
- def entries(self):
- return self._entries
-
- def parse_file(self, digest_file):
- entries = [line.rstrip().split(':') for line in digest_file]
-
- # Perform some sanity-checking to ensure the file is valid.
- valid_characters = set(string.hexdigits)
- for entry in entries:
- if len(entry) != 3:
- return []
- hashed_password = entry[-1]
- if len(hashed_password) != 32:
- return []
- if not set(hashed_password).issubset(valid_characters):
- return []
-
- return entries
diff --git a/Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py b/Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py
deleted file mode 100644
index a2a4ac938..000000000
--- a/Tools/Scripts/webkitpy/common/net/htdigestparser_unittest.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (C) 2011 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
-# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import StringIO
-import os
-import unittest
-
-from webkitpy.common.net.htdigestparser import HTDigestParser
-
-
-class HTDigestParserTest(unittest.TestCase):
- def assertEntriesEqual(self, entries, additional_content=None):
- digest_file = self.fake_htdigest_file()
- if additional_content is not None:
- digest_file.seek(pos=0, mode=os.SEEK_END)
- digest_file.write(additional_content)
- digest_file.seek(pos=0, mode=os.SEEK_SET)
- self.assertEqual(entries, HTDigestParser(digest_file).entries())
-
- def test_authenticate(self):
- htdigest = HTDigestParser(self.fake_htdigest_file())
- self.assertTrue(htdigest.authenticate('user1', 'realm 1', 'password1'))
- self.assertTrue(htdigest.authenticate('user2', 'realm 2', 'password2'))
- self.assertTrue(htdigest.authenticate('user3', 'realm 1', 'password3'))
- self.assertTrue(htdigest.authenticate('user3', 'realm 3', 'password3'))
-
- self.assertFalse(htdigest.authenticate('user1', 'realm', 'password1'))
- self.assertFalse(htdigest.authenticate('user1', 'realm 2', 'password1'))
- self.assertFalse(htdigest.authenticate('user2', 'realm 2', 'password1'))
- self.assertFalse(htdigest.authenticate('user2', 'realm 1', 'password1'))
- self.assertFalse(htdigest.authenticate('', '', ''))
-
- def test_entries(self):
- entries = [
- ['user1', 'realm 1', '36b8aa27fa5e9051095d37b619f92762'],
- ['user2', 'realm 2', '14f827686fa97778f02fe1314a3337c8'],
- ['user3', 'realm 1', '1817fc8a24119cc57fbafc8a630ea5a5'],
- ['user3', 'realm 3', 'a05f5a2335e9d87bbe75bbe5e53248f0'],
- ]
- self.assertEntriesEqual(entries)
- self.assertEntriesEqual(entries, additional_content='')
-
- def test_empty_file(self):
- self.assertEqual([], HTDigestParser(StringIO.StringIO()).entries())
-
- def test_too_few_colons(self):
- self.assertEntriesEqual([], additional_content='user1:realm 1\n')
-
- def test_too_many_colons(self):
- self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f92762:garbage\n')
-
- def test_invalid_hash(self):
- self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f92762000000\n')
- self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f9276\n')
- self.assertEntriesEqual([], additional_content='user1:realm 1:36b8aa27fa5e9051095d37b619f9276z\n')
- self.assertEntriesEqual([], additional_content='user1:realm 1: 36b8aa27fa5e9051095d37b619f92762\n')
-
- def fake_htdigest_file(self):
- return StringIO.StringIO("""user1:realm 1:36b8aa27fa5e9051095d37b619f92762
-user2:realm 2:14f827686fa97778f02fe1314a3337c8
-user3:realm 1:1817fc8a24119cc57fbafc8a630ea5a5
-user3:realm 3:a05f5a2335e9d87bbe75bbe5e53248f0
-""")
diff --git a/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py b/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py
index bce9d855d..639979a02 100644
--- a/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/irc/ircproxy_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.irc.ircproxy import IRCProxy
from webkitpy.common.system.outputcapture import OutputCapture
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults.py b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
index 74322c757..b8cb15769 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults.py
@@ -25,9 +25,6 @@
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# A module for parsing results.html files generated by old-run-webkit-tests
-# This class is one big hack and only needs to exist until we transition to new-run-webkit-tests.
import logging
@@ -45,88 +42,6 @@ def path_for_layout_test(test_name):
return "LayoutTests/%s" % test_name
-class ORWTResultsHTMLParser(object):
- """This class knows how to parse old-run-webkit-tests results.html files."""
-
- stderr_key = u'Tests that had stderr output:'
- fail_key = u'Tests where results did not match expected results:'
- timeout_key = u'Tests that timed out:'
- # FIXME: This may need to be made aware of WebKitTestRunner results for WebKit2.
- crash_key = u'Tests that caused the DumpRenderTree tool to crash:'
- missing_key = u'Tests that had no expected results (probably new):'
- webprocess_crash_key = u'Tests that caused the Web process to crash:'
-
- expected_keys = [
- stderr_key,
- fail_key,
- crash_key,
- webprocess_crash_key,
- timeout_key,
- missing_key,
- ]
-
- @classmethod
- def _failures_from_fail_row(self, row):
- # Look at all anchors in this row, and guess what type
- # of new-run-webkit-test failures they equate to.
- failures = set()
- test_name = None
- for anchor in row.findAll("a"):
- anchor_text = unicode(anchor.string)
- if not test_name:
- test_name = anchor_text
- continue
- if anchor_text in ["expected image", "image diffs"] or '%' in anchor_text:
- failures.add(test_failures.FailureImageHashMismatch())
- elif anchor_text in ["expected", "actual", "diff", "pretty diff"]:
- failures.add(test_failures.FailureTextMismatch())
- else:
- _log.warning("Unhandled link text in results.html parsing: %s. Please file a bug against webkitpy." % anchor_text)
- # FIXME: Its possible the row contained no links due to ORWT brokeness.
- # We should probably assume some type of failure anyway.
- return failures
-
- @classmethod
- def _failures_from_row(cls, row, table_title):
- if table_title == cls.fail_key:
- return cls._failures_from_fail_row(row)
- if table_title == cls.crash_key:
- return [test_failures.FailureCrash()]
- if table_title == cls.webprocess_crash_key:
- return [test_failures.FailureCrash(process_name="WebProcess")]
- if table_title == cls.timeout_key:
- return [test_failures.FailureTimeout()]
- if table_title == cls.missing_key:
- return [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
- return None
-
- @classmethod
- def _test_result_from_row(cls, row, table_title):
- test_name = unicode(row.find("a").string)
- failures = cls._failures_from_row(row, table_title)
- # TestResult is a class designed to work with new-run-webkit-tests.
- # old-run-webkit-tests does not save quite enough information in results.html for us to parse.
- # FIXME: It's unclear if test_name should include LayoutTests or not.
- return test_results.TestResult(test_name, failures)
-
- @classmethod
- def _parse_results_table(cls, table):
- table_title = unicode(table.findPreviousSibling("p").string)
- if table_title not in cls.expected_keys:
- # This Exception should only ever be hit if run-webkit-tests changes its results.html format.
- raise Exception("Unhandled title: %s" % table_title)
- # Ignore stderr failures. Everyone ignores them anyway.
- if table_title == cls.stderr_key:
- return []
- # FIXME: We might end with two TestResults object for the same test if it appears in more than one row.
- return [cls._test_result_from_row(row, table_title) for row in table.findAll("tr")]
-
- @classmethod
- def parse_results_html(cls, page):
- tables = BeautifulSoup(page).findAll("table")
- return sum([cls._parse_results_table(table) for table in tables], [])
-
-
# FIXME: This should be unified with ResultsSummary or other NRWT layout tests code
# in the layout_tests package.
# This doesn't belong in common.net, but we don't have a better place for it yet.
@@ -135,12 +50,8 @@ class LayoutTestResults(object):
def results_from_string(cls, string):
if not string:
return None
- # For now we try to parse first as json, then as results.html
- # eventually we will remove the html fallback support.
test_results = ResultsJSONParser.parse_results_json(string)
if not test_results:
- test_results = ORWTResultsHTMLParser.parse_results_html(string)
- if not test_results:
return None
return cls(test_results)
@@ -150,7 +61,7 @@ class LayoutTestResults(object):
self._unit_test_failures = []
# FIXME: run-webkit-tests should store the --exit-after-N-failures value
- # (or some indication of early exit) somewhere in the results.html/results.json
+ # (or some indication of early exit) somewhere in the results.json
# file. Until it does, callers should set the limit to
# --exit-after-N-failures value used in that run. Consumers of LayoutTestResults
# may use that value to know if absence from the failure list means PASS.
diff --git a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
index 4131bdf85..ea4e9273f 100644
--- a/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/layouttestresults_unittest.py
@@ -26,121 +26,22 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
-from webkitpy.common.net.layouttestresults import LayoutTestResults, ORWTResultsHTMLParser
+from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
-class ORWTResultsHTMLParserTest(unittest.TestCase):
- _example_results_html = """
-<html>
-<head>
-<title>Layout Test Results</title>
-</head>
-<body>
-<p>Tests that had stderr output:</p>
-<table>
-<tr>
-<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
-<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
-</tr>
-<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
-<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
-</tr>
-</table><p>Tests that had no expected results (probably new):</p>
-<table>
-<tr>
-<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
-<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
-</tr>
-</table></body>
-</html>
-"""
-
- _example_results_html_with_failing_tests = """
-<html>
-<head>
-<title>Layout Test Results</title>
-</head>
-<body>
-<p>Tests where results did not match expected results:</p>
-<table>
-<tr>
-<td><a href="http://trac.webkit.org/export/91245/trunk/LayoutTests/compositing/plugins/composited-plugin.html">compositing/plugins/composited-plugin.html</a></td>
-<td>
-<a href="compositing/plugins/composited-plugin-expected.txt">expected</a>
-</td>
-<td>
-<a href="compositing/plugins/composited-plugin-actual.txt">actual</a>
-</td>
-<td>
-<a href="compositing/plugins/composited-plugin-diffs.txt">diff</a>
-</td>
-<td>
-<a href="compositing/plugins/composited-plugin-pretty-diff.html">pretty diff</a>
-</td>
-</tr>
-</table>
-<p>Tests that had stderr output:</p>
-<table>
-<tr>
-<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/accessibility/aria-activedescendant-crash.html">accessibility/aria-activedescendant-crash.html</a></td>
-<td><a href="accessibility/aria-activedescendant-crash-stderr.txt">stderr</a></td>
-</tr>
-<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/http/tests/security/canvas-remote-read-svg-image.html">http/tests/security/canvas-remote-read-svg-image.html</a></td>
-<td><a href="http/tests/security/canvas-remote-read-svg-image-stderr.txt">stderr</a></td>
-</tr>
-</table><p>Tests that had no expected results (probably new):</p>
-<table>
-<tr>
-<td><a href="/var/lib/buildbot/build/gtk-linux-64-release/build/LayoutTests/fast/repaint/no-caret-repaint-in-non-content-editable-element.html">fast/repaint/no-caret-repaint-in-non-content-editable-element.html</a></td>
-<td><a href="fast/repaint/no-caret-repaint-in-non-content-editable-element-actual.txt">result</a></td>
-</tr>
-</table></body>
-</html>
-"""
-
- def test_parse_layout_test_results(self):
- failures = [test_failures.FailureMissingResult(), test_failures.FailureMissingImageHash(), test_failures.FailureMissingImage()]
- testname = 'fast/repaint/no-caret-repaint-in-non-content-editable-element.html'
- expected_results = [test_results.TestResult(testname, failures)]
-
- results = ORWTResultsHTMLParser.parse_results_html(self._example_results_html)
- self.assertEqual(expected_results, results)
-
-
- def test_failures_from_fail_row(self):
- row = BeautifulSoup("<tr><td><a>test.hml</a></td><td><a>expected image</a></td><td><a>25%</a></td></tr>")
- test_name = unicode(row.find("a").string)
- # Even if the caller has already found the test name, findAll inside _failures_from_fail_row will see it again.
- failures = OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row])
- self.assertEqual(len(failures), 1)
- self.assertEqual(type(sorted(failures)[0]), test_failures.FailureImageHashMismatch)
-
- row = BeautifulSoup("<tr><td><a>test.hml</a><a>foo</a></td></tr>")
- expected_logs = "Unhandled link text in results.html parsing: foo. Please file a bug against webkitpy.\n"
- OutputCapture().assert_outputs(self, ORWTResultsHTMLParser._failures_from_fail_row, [row], expected_logs=expected_logs)
-
-
class LayoutTestResultsTest(unittest.TestCase):
-
def test_set_failure_limit_count(self):
results = LayoutTestResults([])
- self.assertEqual(results.failure_limit_count(), None)
+ self.assertIsNone(results.failure_limit_count())
results.set_failure_limit_count(10)
self.assertEqual(results.failure_limit_count(), 10)
def test_results_from_string(self):
- self.assertEqual(LayoutTestResults.results_from_string(None), None)
- self.assertEqual(LayoutTestResults.results_from_string(""), None)
- results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html)
- self.assertEqual(len(results.failing_tests()), 1)
-
- def test_tests_matching_failure_types(self):
- results = LayoutTestResults.results_from_string(ORWTResultsHTMLParserTest._example_results_html_with_failing_tests)
- failing_tests = results.tests_matching_failure_types([test_failures.FailureTextMismatch])
- self.assertEqual(len(results.failing_tests()), 2)
+ self.assertIsNone(LayoutTestResults.results_from_string(None))
+ self.assertIsNone(LayoutTestResults.results_from_string(""))
diff --git a/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py b/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
index 3302dec80..67439f094 100644
--- a/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/networktransaction_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.networktransaction import NetworkTransaction, NetworkTimeout
from webkitpy.common.system.logtesting import LoggingTestCase
diff --git a/Tools/Scripts/webkitpy/common/net/omahaproxy.py b/Tools/Scripts/webkitpy/common/net/omahaproxy.py
deleted file mode 100644
index b7b481f4e..000000000
--- a/Tools/Scripts/webkitpy/common/net/omahaproxy.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright (C) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# This is the client to query http://omahaproxy.appspot.com/ to retrieve
-# chrome versions associated with WebKit commits.
-
-from webkitpy.common.net.networktransaction import NetworkTransaction
-from webkitpy.common.config import urls
-
-import json
-import urllib2
-
-
-class OmahaProxy(object):
- default_url = urls.omahaproxy_url
-
- chrome_platforms = {"linux": "Linux",
- "win": "Windows",
- "mac": "Mac",
- "cros": "Chrome OS",
- "cf": "Chrome Frame",
- "ios": "iOS"}
- chrome_channels = ["canary", "dev", "beta", "stable"]
-
- def __init__(self, url=default_url, browser=None):
- self._chrome_channels = set(self.chrome_channels)
- self.set_url(url)
- from webkitpy.thirdparty.autoinstalled.mechanize import Browser
- self._browser = browser or Browser()
-
- def set_url(self, url):
- self.url = url
-
- def _json_url(self):
- return "%s/all.json" % self.url
-
- def _get_json(self):
- return NetworkTransaction().run(lambda: urllib2.urlopen(self._json_url()).read())
-
- def get_revisions(self):
- revisions_json = json.loads(self._get_json())
- revisions = []
- for platform in revisions_json:
- for version in platform["versions"]:
- try:
- row = {
- "commit": int(version["base_webkit_revision"]),
- "channel": version["channel"],
- "platform": self.chrome_platforms.get(platform["os"], platform["os"]),
- "date": version["date"],
- }
- assert(version["channel"] in self._chrome_channels)
- revisions.append(row)
- except ValueError:
- next
- return revisions
diff --git a/Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py b/Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py
deleted file mode 100644
index f3e5be358..000000000
--- a/Tools/Scripts/webkitpy/common/net/omahaproxy_unittest.py
+++ /dev/null
@@ -1,139 +0,0 @@
-# Copyright (C) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-# Unit test for omahaproxy.py
-
-import unittest
-
-from webkitpy.common.net.omahaproxy import OmahaProxy
-
-
-class MockOmahaProxy(OmahaProxy):
- def __init__(self, json):
- self._get_json = lambda: json
- OmahaProxy.__init__(self)
-
-
-class OmahaProxyTest(unittest.TestCase):
- example_omahaproxy_json = """[
- {"os": "win",
- "versions": [
- {"base_webkit_revision": "116185",
- "v8_ver": "3.10.8.1",
- "wk_ver": "536.11",
- "base_trunk_revision": 135598,
- "prev_version": "20.0.1128.0",
- "version": "20.0.1129.0",
- "date": "05\/07\/12",
- "prev_date": "05\/06\/12",
- "true_branch": "trunk",
- "channel": "canary",
- "branch_revision": "NA"},
- {"base_webkit_revision": "115687",
- "v8_ver": "3.10.6.0",
- "wk_ver": "536.10",
- "base_trunk_revision": 134666,
- "prev_version": "20.0.1123.1",
- "version": "20.0.1123.4",
- "date": "05\/04\/12",
- "prev_date": "05\/02\/12",
- "true_branch": "1123",
- "channel": "dev",
- "branch_revision": 135092}]},
- {"os": "linux",
- "versions": [
- {"base_webkit_revision": "115688",
- "v8_ver": "3.10.6.0",
- "wk_ver": "536.10",
- "base_trunk_revision": 134666,
- "prev_version": "20.0.1123.2",
- "version": "20.0.1123.4",
- "date": "05\/04\/12",
- "prev_date": "05\/02\/12",
- "true_branch": "1123",
- "channel": "dev",
- "branch_revision": 135092},
- {"base_webkit_revision": "112327",
- "v8_ver": "3.9.24.17",
- "wk_ver": "536.5",
- "base_trunk_revision": 129376,
- "prev_version": "19.0.1084.36",
- "version": "19.0.1084.41",
- "date": "05\/03\/12",
- "prev_date": "04\/25\/12",
- "true_branch": "1084",
- "channel": "beta",
- "branch_revision": 134854},
- {"base_webkit_revision": "*",
- "v8_ver": "3.9.24.17",
- "wk_ver": "536.5",
- "base_trunk_revision": 129376,
- "prev_version": "19.0.1084.36",
- "version": "19.0.1084.41",
- "date": "05\/03\/12",
- "prev_date": "04\/25\/12",
- "true_branch": "1084",
- "channel": "release",
- "branch_revision": 134854}]},
- {"os": "weird-platform",
- "versions": [
- {"base_webkit_revision": "115688",
- "v8_ver": "3.10.6.0",
- "wk_ver": "536.10",
- "base_trunk_revision": 134666,
- "prev_version": "20.0.1123.2",
- "version": "20.0.1123.4",
- "date": "05\/04\/12",
- "prev_date": "05\/02\/12",
- "true_branch": "1123",
- "channel": "dev",
- "branch_revision": 135092}]}]"""
-
- expected_revisions = [
- {"commit": 116185, "channel": "canary", "platform": "Windows", "date": "05/07/12"},
- {"commit": 115687, "channel": "dev", "platform": "Windows", "date": "05/04/12"},
- {"commit": 115688, "channel": "dev", "platform": "Linux", "date": "05/04/12"},
- {"commit": 112327, "channel": "beta", "platform": "Linux", "date": "05/03/12"},
- {"commit": 115688, "channel": "dev", "platform": "weird-platform", "date": "05/04/12"},
- ]
-
- def test_get_revisions(self):
- omahaproxy = MockOmahaProxy(self.example_omahaproxy_json)
- revisions = omahaproxy.get_revisions()
- self.assertEqual(len(revisions), 5)
- for revision in revisions:
- self.assertTrue("commit" in revision)
- self.assertTrue("channel" in revision)
- self.assertTrue("platform" in revision)
- self.assertTrue("date" in revision)
- self.assertEqual(len(revision.keys()), 4)
- self.assertEqual(revisions, self.expected_revisions)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py b/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
index 867379f92..aaeb5dc6d 100644
--- a/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/resultsjsonparser_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.resultsjsonparser import ResultsJSONParser
from webkitpy.layout_tests.models import test_results
diff --git a/Tools/Scripts/webkitpy/common/net/statusserver.py b/Tools/Scripts/webkitpy/common/net/statusserver.py
index 99850f55d..a8d0beaf3 100644
--- a/Tools/Scripts/webkitpy/common/net/statusserver.py
+++ b/Tools/Scripts/webkitpy/common/net/statusserver.py
@@ -40,7 +40,7 @@ _log = logging.getLogger(__name__)
class StatusServer:
# FIXME: This should probably move to common.config.urls.
- default_host = "queues.webkit.org"
+ default_host = "webkit-queues.appspot.com"
def __init__(self, host=default_host, browser=None, bot_id=None):
self.set_host(host)
diff --git a/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py b/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py
index 1f0afd05b..91a42c93e 100644
--- a/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/statusserver_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.statusserver import StatusServer
from webkitpy.common.system.outputcapture import OutputCaptureTestCaseBase
diff --git a/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py b/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
index 40e80236f..227139271 100644
--- a/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/unittestresults_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from unittestresults import UnitTestResults
@@ -34,10 +34,10 @@ from unittestresults import UnitTestResults
class UnitTestResultsTest(unittest.TestCase):
def test_nostring(self):
- self.assertEqual(None, UnitTestResults.results_from_string(None))
+ self.assertIsNone(UnitTestResults.results_from_string(None))
def test_emptystring(self):
- self.assertEqual(None, UnitTestResults.results_from_string(""))
+ self.assertIsNone(UnitTestResults.results_from_string(""))
def test_nofailures(self):
no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
@@ -92,7 +92,3 @@ Expected: 6.28]]></failure>
</testsuites>"""
expected = ["ClassOne.TestOne", "ClassTwo.TestTwo"]
self.assertEqual(expected, UnitTestResults.results_from_string(multiple_failures_per_test_xml))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/net/web_mock.py b/Tools/Scripts/webkitpy/common/net/web_mock.py
index 423573c60..b53cb66ee 100644
--- a/Tools/Scripts/webkitpy/common/net/web_mock.py
+++ b/Tools/Scripts/webkitpy/common/net/web_mock.py
@@ -30,11 +30,14 @@ import StringIO
class MockWeb(object):
- def __init__(self):
+ def __init__(self, urls=None):
+ self.urls = urls or {}
self.urls_fetched = []
def get_binary(self, url, convert_404_to_None=False):
self.urls_fetched.append(url)
+ if url in self.urls:
+ return self.urls[url]
return "MOCK Web result, convert 404 to None=%s" % convert_404_to_None
diff --git a/Tools/Scripts/webkitpy/common/newstringio.py b/Tools/Scripts/webkitpy/common/newstringio.py
index 724fb11aa..7748d505d 100644
--- a/Tools/Scripts/webkitpy/common/newstringio.py
+++ b/Tools/Scripts/webkitpy/common/newstringio.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/common/newstringio_unittest.py b/Tools/Scripts/webkitpy/common/newstringio_unittest.py
index 1ee2fb91f..670472204 100644
--- a/Tools/Scripts/webkitpy/common/newstringio_unittest.py
+++ b/Tools/Scripts/webkitpy/common/newstringio_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -29,7 +28,7 @@
"""Unit tests for newstringio module."""
-import unittest
+import unittest2 as unittest
import newstringio
@@ -39,6 +38,3 @@ class NewStringIOTest(unittest.TestCase):
with newstringio.StringIO("foo") as f:
contents = f.read()
self.assertEqual(contents, "foo")
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/prettypatch_unittest.py b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
index 37fa844fd..3ae1c43fa 100644
--- a/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
+++ b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
@@ -28,7 +28,7 @@
import os.path
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive import Executive
from webkitpy.common.prettypatch import PrettyPatch
@@ -77,7 +77,7 @@ Index: latin1_test
pretty_patch = PrettyPatch(Executive(), self._webkit_root())
pretty = pretty_patch.pretty_diff(self._diff_with_multiple_encodings)
self.assertTrue(pretty) # We got some output
- self.assertTrue(isinstance(pretty, str)) # It's a byte array, not unicode
+ self.assertIsInstance(pretty, str) # It's a byte array, not unicode
def test_pretty_print_empty_string(self):
if not self.check_ruby():
diff --git a/Tools/Scripts/webkitpy/common/read_checksum_from_png.py b/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
index 70a0502b7..7431f47a3 100644
--- a/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
+++ b/Tools/Scripts/webkitpy/common/read_checksum_from_png.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py b/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
index a56bec2e4..751f7af90 100644
--- a/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
+++ b/Tools/Scripts/webkitpy/common/read_checksum_from_png_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -23,7 +22,7 @@
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
-import unittest
+import unittest2 as unittest
from webkitpy.common import read_checksum_from_png
@@ -37,8 +36,4 @@ class ReadChecksumFromPngTest(unittest.TestCase):
# Test a file without the comment.
filehandle = StringIO.StringIO('''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x03 \x00\x00\x02X\x08\x02\x00\x00\x00\x15\x14\x15'\x00\x00\x16\xfeIDATx\x9c\xed\xdd[\x8cU\xe5\xc1\xff\xf15T\x18\x0ea,)\xa6\x80XZ<\x10\n\xd6H\xc4V\x88}\xb5\xa9\xd6r\xd5\x0bki0\xa6\xb5ih\xd2\xde\x98PHz\xd1\x02=\\q#\x01\x8b\xa5rJ\x8b\x88i\xacM\xc5h\x8cbMk(\x1ez@!\x0c\xd5\xd2\xc2\xb44\x1c\x848\x1dF(\xeb\x7f\xb1\xff\xd9\xef~g\xd6\xde3\xe0o\x10\xec\xe7sa6{\xd6z\xd6\xb3\xd7\xf3\xa8_7\xdbM[Y\x96\x05\x00\x009\xc3\xde\xeb\t\x00\x00\xbc\xdf\x08,\x00\x800\x81\x05\x00\x10&\xb0\x00\x00\xc2\x04\x16\x00@\x98\xc0\x02\x00\x08\x13X\x00\x00a\x02\x0b\x00 Lx01\x00\x84\t,\x00\x800\x81\x05\x00\x10\xd64\xb0\xda\x9a\xdb\xb6m\xdb\xb4i\xd3\xfa\x9fr\xf3\xcd7\x0f\xe5T\x07\xe5\xd4\xa9S\x8b\x17/\x1e?~\xfc\xf8\xf1\xe3\xef\xbf\xff\xfe\xf7z:M5\xbb\x87\x17\xcbUZ\x8f|V\xd7\xbd\x10\xb6\xcd{b\x88\xf6j\xb3\x9b?\x14\x9b\xa1>\xe6\xf9\xd9\xcf\x00\x17\x93''')
checksum = read_checksum_from_png.read_checksum(filehandle)
- self.assertEqual(None, checksum)
-
-
-if __name__ == '__main__':
- unittest.main()
+ self.assertIsNone(checksum)
diff --git a/Tools/Scripts/webkitpy/common/system/autoinstall.py b/Tools/Scripts/webkitpy/common/system/autoinstall.py
index 9d1f8cb2f..2e15887bb 100755..100644
--- a/Tools/Scripts/webkitpy/common/system/autoinstall.py
+++ b/Tools/Scripts/webkitpy/common/system/autoinstall.py
@@ -35,10 +35,11 @@ import codecs
import logging
import os
import shutil
+import stat
import sys
import tarfile
import tempfile
-import urllib
+import urllib2
import urlparse
import zipfile
@@ -173,7 +174,7 @@ class AutoInstaller(object):
return scratch_dir
def _url_downloaded_path(self, target_name):
- return os.path.join(self._target_dir, ".%s.url" % target_name)
+ return os.path.join(self._target_dir, ".%s.url" % target_name.replace('/', '_'))
def _is_downloaded(self, target_name, url):
version_path = self._url_downloaded_path(target_name)
@@ -283,17 +284,27 @@ class AutoInstaller(object):
return new_path
def _download_to_stream(self, url, stream):
- try:
- netstream = urllib.urlopen(url)
- except IOError, err:
- # Append existing Error message to new Error.
- message = ('Could not download Python modules from URL "%s".\n'
- " Make sure you are connected to the internet.\n"
- " You must be connected to the internet when "
- "downloading needed modules for the first time.\n"
- " --> Inner message: %s"
- % (url, err))
- raise IOError(message)
+ failures = 0
+ while True:
+ try:
+ netstream = urllib2.urlopen(url)
+ break
+ except IOError, err:
+ # Try multiple times
+ if failures < 5:
+ _log.warning("Failed to download %s, %s retrying" % (
+ url, err))
+ failures += 1
+ continue
+
+ # Append existing Error message to new Error.
+ message = ('Could not download Python modules from URL "%s".\n'
+ " Make sure you are connected to the internet.\n"
+ " You must be connected to the internet when "
+ "downloading needed modules for the first time.\n"
+ " --> Inner message: %s"
+ % (url, err))
+ raise IOError(message)
code = 200
if hasattr(netstream, "getcode"):
code = netstream.getcode()
@@ -319,8 +330,7 @@ class AutoInstaller(object):
return target_path
- def _install(self, scratch_dir, package_name, target_path, url,
- url_subpath):
+ def _install(self, scratch_dir, package_name, target_path, url, url_subpath, files_to_remove):
"""Install a python package from an URL.
This internal method overwrites the target path if the target
@@ -335,6 +345,13 @@ class AutoInstaller(object):
else:
source_path = os.path.join(path, url_subpath)
+ for filename in files_to_remove:
+ path = os.path.join(source_path, filename.replace('/', os.sep))
+ if os.path.exists(path):
+ # Pre-emptively change the permissions to #0777 to try and work around win32 permissions issues.
+ os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
+ os.remove(path)
+
if os.path.exists(target_path):
if os.path.isdir(target_path):
shutil.rmtree(target_path, ignore_errors=True)
@@ -354,7 +371,7 @@ class AutoInstaller(object):
self._record_url_downloaded(package_name, url)
def install(self, url, should_refresh=False, target_name=None,
- url_subpath=None):
+ url_subpath=None, files_to_remove=None):
"""Install a python package from an URL.
Args:
@@ -382,10 +399,11 @@ class AutoInstaller(object):
url_subpath = os.path.normpath(url_subpath)
target_name = os.path.basename(url_subpath)
- target_path = os.path.join(self._target_dir, target_name)
+ target_path = os.path.join(self._target_dir, target_name.replace('/', os.sep))
if not should_refresh and self._is_downloaded(target_name, url):
return False
+ files_to_remove = files_to_remove or []
package_name = target_name.replace(os.sep, '.')
_log.info("Auto-installing package: %s" % package_name)
@@ -399,7 +417,8 @@ class AutoInstaller(object):
target_path=target_path,
scratch_dir=scratch_dir,
url=url,
- url_subpath=url_subpath)
+ url_subpath=url_subpath,
+ files_to_remove=files_to_remove)
except Exception, err:
# Append existing Error message to new Error.
message = ("Error auto-installing the %s package to:\n"
diff --git a/Tools/Scripts/webkitpy/common/system/crashlogs.py b/Tools/Scripts/webkitpy/common/system/crashlogs.py
index 270ca81ed..7ebe52241 100644
--- a/Tools/Scripts/webkitpy/common/system/crashlogs.py
+++ b/Tools/Scripts/webkitpy/common/system/crashlogs.py
@@ -26,16 +26,23 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import codecs
import re
class CrashLogs(object):
- def __init__(self, host):
+
+ PID_LINE_REGEX = re.compile(r'\s+Global\s+PID:\s+\[(?P<pid>\d+)\]')
+
+ def __init__(self, host, results_directory=None):
self._host = host
+ self._results_directory = results_directory
def find_newest_log(self, process_name, pid=None, include_errors=False, newer_than=None):
if self._host.platform.is_mac():
return self._find_newest_log_darwin(process_name, pid, include_errors, newer_than)
+ elif self._host.platform.is_win():
+ return self._find_newest_log_win(process_name, pid, include_errors, newer_than)
return None
def _log_directory_darwin(self):
@@ -72,3 +79,35 @@ class CrashLogs(object):
if include_errors and errors:
return errors
return None
+
+ def _find_newest_log_win(self, process_name, pid, include_errors, newer_than):
+ def is_crash_log(fs, dirpath, basename):
+ return basename.startswith("CrashLog")
+
+ logs = self._host.filesystem.files_under(self._results_directory, file_filter=is_crash_log)
+ errors = ''
+ for path in reversed(sorted(logs)):
+ try:
+ if not newer_than or self._host.filesystem.mtime(path) > newer_than:
+ log_file = self._host.filesystem.read_binary_file(path).decode('utf8', 'ignore')
+ match = self.PID_LINE_REGEX.search(log_file)
+ if match is None:
+ continue
+ if int(match.group('pid')) == pid:
+ return errors + log_file
+ except IOError, e:
+ print "IOError %s" % str(e)
+ if include_errors:
+ errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
+ except OSError, e:
+ print "OSError %s" % str(e)
+ if include_errors:
+ errors += "ERROR: Failed to read '%s': %s\n" % (path, str(e))
+ except UnicodeDecodeError, e:
+ print "UnicodeDecodeError %s" % str(e)
+ if include_errors:
+ errors += "ERROR: Failed to decode '%s' as utf8: %s\n" % (path, str(e))
+
+ if include_errors and errors:
+ return errors
+ return None
diff --git a/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py b/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
index 1f5c40a09..48034e806 100644
--- a/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/crashlogs_unittest.py
@@ -21,7 +21,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.filesystem_mock import MockFileSystem
@@ -29,6 +29,8 @@ from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.thirdparty.mock import Mock
+# Needed to support Windows port tests
+from webkitpy.port.win import WinPort
def make_mock_crash_report_darwin(process_name, pid):
return """Process: {process_name} [{pid}]
@@ -68,14 +70,169 @@ PCI Card: NVIDIA GeForce GT 120, sppci_displaycontroller, MXM-Slot
Serial ATA Device: OPTIARC DVD RW AD-5670S
""".format(process_name=process_name, pid=pid)
-class CrashLogsTest(unittest.TestCase):
- def assertLinesEqual(self, a, b):
- if hasattr(self, 'assertMultiLineEqual'):
- self.assertMultiLineEqual(a, b)
- else:
- self.assertEqual(a.splitlines(), b.splitlines())
+def make_mock_crash_report_win(process_name, pid):
+ return """Opened log file 'C:\Projects\WebKit\OpenSource\WebKitBuild\Release\bin32\layout-test-results\CrashLog_1d58_2013-06-03_12-21-20-110.txt'
+0:000> .srcpath "C:\Projects\WebKit\OpenSource"
+Source search path is: C:\Projects\WebKit\OpenSource
+0:000> !analyze -vv
+*******************************************************************************
+* *
+* Exception Analysis *
+* *
+*******************************************************************************
+
+*** ERROR: Symbol file could not be found. Defaulted to export symbols for C:\Projects\WebKit\OpenSource\WebKitBuild\Release\bin32\libdispatch.dll -
+*** ERROR: Symbol file could not be found. Defaulted to export symbols for C:\Windows\SYSTEM32\atiumdag.dll -
+
+FAULTING_IP:
+JavaScriptCore!JSC::JSActivation::getOwnPropertySlot+0 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp @ 146]
+01e3d070 55 push ebp
+
+EXCEPTION_RECORD: 00092cc8 -- (.exr 0x92cc8)
+.exr 0x92cc8
+ExceptionAddress: 01e3d070 (JavaScriptCore!JSC::JSActivation::getOwnPropertySlot)
+ ExceptionCode: c00000fd (Stack overflow)
+ ExceptionFlags: 00000000
+NumberParameters: 2
+ Parameter[0]: 00000001
+ Parameter[1]: 00092ffc
+
+FAULTING_THREAD: 00000e68
+PROCESS_NAME: {process_name}
+ERROR_CODE: (NTSTATUS) 0xc0000005 - The instruction at 0x%08lx referenced memory at 0x%08lx. The memory could not be %s.
+EXCEPTION_CODE: (NTSTATUS) 0xc0000005 - The instruction at 0x%08lx referenced memory at 0x%08lx. The memory could not be %s.
+EXCEPTION_CODE_STR: c0000005
+EXCEPTION_PARAMETER1: 00000000
+EXCEPTION_PARAMETER2: 00090000
+READ_ADDRESS: 00090000
+
+FOLLOWUP_IP:
+JavaScriptCore!JSC::JSActivation::getOwnPropertySlot+0 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp @ 146]
+01e3d070 55 push ebp
+
+WATSON_BKT_PROCSTAMP: 51a8f979
+WATSON_BKT_MODULE: MSVCR100.dll
+WATSON_BKT_MODVER: 10.0.40219.325
+WATSON_BKT_MODSTAMP: 4df2be1e
+WATSON_BKT_MODOFFSET: 160d7
+MODULE_VER_PRODUCT: Microsoft(R) Visual Studio(R) 2010
+BUILD_VERSION_STRING: 6.2.9200.16384 (win8_rtm.120725-1247)
+NTGLOBALFLAG: 0
+APPLICATION_VERIFIER_FLAGS: 0
+APP: {process_name}
+
+ANALYSIS_SESSION_HOST: FULGBR-PC
+
+ANALYSIS_SESSION_TIME: 06-03-2013 12:21:20.0111
+
+CONTEXT: 00092d18 -- (.cxr 0x92d18)
+.cxr 0x92d18
+eax=01e3d070 ebx=000930bc ecx=7fe03ed0 edx=0751e168 esi=07a7ff98 edi=0791ff78
+eip=01e3d070 esp=00093000 ebp=0009306c iopl=0 nv up ei ng nz ac po cy
+cs=0023 ss=002b ds=002b es=002b fs=0053 gs=002b efl=00210293
+JavaScriptCore!JSC::JSActivation::getOwnPropertySlot:
+01e3d070 55 push ebp
+.cxr
+Resetting default scope
+
+RECURRING_STACK: From frames 0x14 to 0x1d
+
+THREAD_ATTRIBUTES:
+
+[ GLOBAL ]
+
+ Global PID: [{pid}]
+ Global Thread_Count: [19]
+ Global PageSize: [4096]
+ Global ModList_SHA1_Hash: [aacef4e7e83b9bddc9cd0cc094dac88d531ea4a3]
+ Global CommandLine: [C:\Projects\WebKit\OpenSource\WebKitBuild\Release\bin32\{process_name} -]
+ Global Desktop_Name: [Winsta0\Default]
+ Global ProcessName: [{process_name}]
+ Global Debugger_CPU_Architecture: [X86]
+ Global CPU_ProcessorCount: [24]
+ Global CPU_MHZ: [1596]
+ Global CPU_Architecture: [X86]
+ Global CPU_Family: [6]
+ Global CPU_Model: [12]
+ Global CPU_Stepping: [2]
+ Global CPU_VendorString: [GenuineIntel]
+ Global LoadedModule_Count: [82]
+ Global ProcessBeingDebugged
+ Global GFlags: [0]
+ Global Application_Verifer_Flags: [0]
+ Global FinalExh: [2012093943]
+ Global SystemUpTime: [3 days 23:52:56.000]
+ Global SystemUpTime: [345176]
+ Global ProcessUpTime: [0 days 0:00:00.000]
+ Global ProcessUpTime: [0]
+ Global CurrentTimeDate: [Mon Jun 3 12:21:20.000 2013 (UTC - 7:00)]
+ Global CurrentTimeDate: [1370287280]
+ Global ProductType: [1]
+ Global SuiteMask: [272]
+ Global ApplicationName: [{process_name}]
+ Global ASLR_Enabled
+ Global SafeSEH_Enabled
+
+FAULT_INSTR_CODE: 83ec8b55
+
+FAULTING_SOURCE_LINE: c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp
+
+FAULTING_SOURCE_FILE: c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp
+
+FAULTING_SOURCE_LINE_NUMBER: 146
+
+SYMBOL_STACK_INDEX: 0
+
+SYMBOL_NAME: javascriptcore!JSC::JSActivation::getOwnPropertySlot+92ffc
+
+FOLLOWUP_NAME: MachineOwner
+
+MODULE_NAME: JavaScriptCore
+
+IMAGE_NAME: JavaScriptCore.dll
+
+DEBUG_FLR_IMAGE_TIMESTAMP: 51ace473
+
+STACK_COMMAND: .cxr 00092D18 ; kb ; dps 93000 ; kb
+FAILURE_BUCKET_ID: STACK_OVERFLOW_c0000005_JavaScriptCore.dll!JSC::JSActivation::getOwnPropertySlot
+
+BUCKET_ID: APPLICATION_FAULT_STACK_OVERFLOW_INVALID_POINTER_READ_javascriptcore!JSC::JSActivation::getOwnPropertySlot+92ffc
+
+ANALYSIS_SESSION_ELAPSED_TIME: 18df
+
+Followup: MachineOwner
+---------
+
+0:000> ~*kpn
+
+. 0 Id: 18e0.e68 Suspend: 1 Teb: 7ffdd000 Unfrozen
+ # ChildEBP RetAddr
+00 00092a08 7261ece1 MSVCR100!_alloca_probe+0x27
+01 00092a4c 7261a5d0 MSVCR100!_write+0x95
+02 00092a6c 7261ef6b MSVCR100!_flush+0x3b
+03 00092a7c 7261ef1c MSVCR100!_fflush_nolock+0x1c
+04 00092ab4 1000f814 MSVCR100!fflush+0x30
+05 00092ac8 77c0084e DumpRenderTree_10000000!exceptionFilter(struct _EXCEPTION_POINTERS * __formal = 0x852ac807)+0x24 [c:\projects\webkit\opensource\tools\dumprendertree\win\dumprendertree.cpp @ 1281]
+06 00092b60 77e8bf2c KERNELBASE!UnhandledExceptionFilter+0x164
+07 00092b68 77e530b4 ntdll!__RtlUserThreadStart+0x57
+08 00092b7c 77e15246 ntdll!_EH4_CallFilterFunc+0x12
+09 00092ba4 77e151b1 ntdll!_except_handler4_common+0x8e
+0a 00092bc4 77e52e71 ntdll!_except_handler4+0x20
+0b 00092be8 77e52e43 ntdll!ExecuteHandler2+0x26
+0c 00092cb0 77e52cbb ntdll!ExecuteHandler+0x24
+0d 00092cb0 01e3d070 ntdll!KiUserExceptionDispatcher+0xf
+0e 00092ffc 01e67d25 JavaScriptCore!JSC::JSActivation::getOwnPropertySlot(class JSC::JSCell * cell = 0x07a7ff98, class JSC::ExecState * exec = 0x0751e168, class JSC::PropertyName propertyName = class JSC::PropertyName, class JSC::PropertySlot * slot = 0x000930bc) [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsactivation.cpp @ 146]
+0f 0009306c 01e68837 JavaScriptCore!JSC::JSScope::resolveContainingScopeInternal<1,2>(class JSC::ExecState * callFrame = 0x0751e168, class JSC::Identifier * identifier = 0x7fe0ebc0, class JSC::PropertySlot * slot = 0x7fe03ed0, class WTF::Vector<JSC::ResolveOperation,0,WTF::CrashOnOverflow> * operations = 0x7fda16c0, struct JSC::PutToBaseOperation * putToBaseOperation = 0x00000000, bool __formal = false)+0x205 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsscope.cpp @ 247]
+10 00093090 01e65860 JavaScriptCore!JSC::JSScope::resolveContainingScope<1>(class JSC::ExecState * callFrame = 0x0751e168, class JSC::Identifier * identifier = 0x7fe0ebc0, class JSC::PropertySlot * slot = 0x000930bc, class WTF::Vector<JSC::ResolveOperation,0,WTF::CrashOnOverflow> * operations = 0x7fda16c0, struct JSC::PutToBaseOperation * putToBaseOperation = 0x00000000, bool isStrict = false)+0x27 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsscope.cpp @ 427]
+11 00093104 01dceeff JavaScriptCore!JSC::JSScope::resolve(class JSC::ExecState * callFrame = 0x0751e168, class JSC::Identifier * identifier = 0x7fe0ebc0, class WTF::Vector<JSC::ResolveOperation,0,WTF::CrashOnOverflow> * operations = 0x7fda16c0)+0xc0 [c:\projects\webkit\opensource\source\javascriptcore\runtime\jsscope.cpp @ 447]
+
+0:000> q
+quit:
+""".format(process_name=process_name, pid=pid)
+
+class CrashLogsTest(unittest.TestCase):
def test_find_log_darwin(self):
if not SystemHost().platform.is_mac():
return
@@ -95,15 +252,15 @@ class CrashLogsTest(unittest.TestCase):
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
log = crash_logs.find_newest_log("DumpRenderTree")
- self.assertLinesEqual(log, newer_mock_crash_report)
+ self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28529)
- self.assertLinesEqual(log, newer_mock_crash_report)
+ self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28530)
- self.assertLinesEqual(log, mock_crash_report)
+ self.assertMultiLineEqual(log, mock_crash_report)
log = crash_logs.find_newest_log("DumpRenderTree", 28531)
- self.assertEqual(log, None)
+ self.assertIsNone(log)
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
- self.assertEqual(log, None)
+ self.assertIsNone(log)
def bad_read(path):
raise IOError('IOError: No such file or directory')
@@ -113,10 +270,47 @@ class CrashLogsTest(unittest.TestCase):
filesystem.read_text_file = bad_read
log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
- self.assertTrue('IOError: No such file or directory' in log)
+ self.assertIn('IOError: No such file or directory', log)
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
filesystem.mtime = bad_mtime
log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0, include_errors=True)
- self.assertTrue('OSError: No such file or directory' in log)
+ self.assertIn('OSError: No such file or directory', log)
+
+ def test_find_log_win(self):
+ if not SystemHost().platform.is_win():
+ return
+
+ older_mock_crash_report = make_mock_crash_report_win('DumpRenderTree', 28528)
+ mock_crash_report = make_mock_crash_report_win('DumpRenderTree', 28530)
+ newer_mock_crash_report = make_mock_crash_report_win('DumpRenderTree', 28529)
+ other_process_mock_crash_report = make_mock_crash_report_win('FooProcess', 28527)
+ misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + make_mock_crash_report_win('DumpRenderTree', 28526)[200:]
+ files = {}
+ files['~/CrashLog_1d58_2013-06-03_12-21-20-110.txt'] = older_mock_crash_report
+ files['~/CrashLog_abcd_2013-06-03_12-22-19-129.txt'] = mock_crash_report
+ files['~/CrashLog_2eff_2013-06-03_12-23-20-150.txt'] = newer_mock_crash_report
+ files['~/CrashLog_31a0_2013-06-03_12-24-22-119.txt'] = None
+ files['~/CrashLog_01a3_2013-06-03_12-25-23-120.txt'] = other_process_mock_crash_report
+ files['~/CrashLog_aadd_2013-06-03_12-26-24-121.txt'] = misformatted_mock_crash_report
+ filesystem = MockFileSystem(files)
+ mock_host = MockSystemHost(os_name='win', filesystem=filesystem)
+ crash_logs = CrashLogs(mock_host, "~")
+
+ log = crash_logs.find_newest_log("DumpRenderTree", 28529)
+ self.assertMultiLineEqual(log, newer_mock_crash_report)
+ log = crash_logs.find_newest_log("DumpRenderTree", 28530)
+ self.assertMultiLineEqual(log, mock_crash_report)
+ log = crash_logs.find_newest_log("DumpRenderTree", 28531)
+ self.assertIsNone(log)
+ log = crash_logs.find_newest_log("DumpRenderTree", newer_than=1.0)
+ self.assertIsNone(log)
+
+ def bad_read(path):
+ raise IOError('IOError: No such file or directory')
+
+ filesystem.read_text_file = bad_read
+ filesystem.read_binary_file = bad_read
+ log = crash_logs.find_newest_log("DumpRenderTree", 28531, include_errors=True)
+ self.assertIn('IOError: No such file or directory', log)
diff --git a/Tools/Scripts/webkitpy/common/system/environment_unittest.py b/Tools/Scripts/webkitpy/common/system/environment_unittest.py
index 6558b51df..2868a65d2 100644
--- a/Tools/Scripts/webkitpy/common/system/environment_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/environment_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from .environment import Environment
diff --git a/Tools/Scripts/webkitpy/common/system/executive.py b/Tools/Scripts/webkitpy/common/system/executive.py
index 42a8122d3..ca45f2f35 100644
--- a/Tools/Scripts/webkitpy/common/system/executive.py
+++ b/Tools/Scripts/webkitpy/common/system/executive.py
@@ -46,14 +46,6 @@ _log = logging.getLogger(__name__)
class ScriptError(Exception):
- # This is a custom List.__str__ implementation to allow size limiting.
- def _string_from_args(self, args, limit=100):
- args_string = unicode(args)
- # We could make this much fancier, but for now this is OK.
- if len(args_string) > limit:
- return args_string[:limit - 3] + "..."
- return args_string
-
def __init__(self,
message=None,
script_args=None,
@@ -61,7 +53,7 @@ class ScriptError(Exception):
output=None,
cwd=None):
if not message:
- message = 'Failed to run "%s"' % self._string_from_args(script_args)
+ message = 'Failed to run "%s"' % repr(script_args)
if exit_code:
message += " exit_code: %d" % exit_code
if cwd:
@@ -92,6 +84,9 @@ class Executive(object):
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
+ def __init__(self):
+ self.pid_to_system_pid = {}
+
def _should_close_fds(self):
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
@@ -101,9 +96,6 @@ class Executive(object):
return sys.platform not in ('win32', 'cygwin')
def _run_command_with_teed_output(self, args, teed_output, **kwargs):
- args = map(unicode, args) # Popen will throw an exception if args are non-strings (like int())
- args = map(self._encode_argument_if_needed, args)
-
child_process = self.popen(args,
stdout=self.PIPE,
stderr=self.STDOUT,
@@ -153,6 +145,12 @@ class Executive(object):
return child_output
def cpu_count(self):
+ try:
+ cpus = int(os.environ.get('NUMBER_OF_PROCESSORS'))
+ if cpus > 0:
+ return cpus
+ except (ValueError, TypeError):
+ pass
return multiprocessing.cpu_count()
@staticmethod
@@ -272,26 +270,37 @@ class Executive(object):
return False
def running_pids(self, process_name_filter=None):
+ if sys.platform == "win32":
+ # FIXME: running_pids isn't implemented on native Windows yet...
+ return []
+
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = []
-
- if sys.platform in ("win32", "cygwin"):
- # FIXME: running_pids isn't implemented on Windows yet...
- return []
-
- ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
- stdout, _ = ps_process.communicate()
- for line in stdout.splitlines():
- try:
- # In some cases the line can contain one or more
- # leading white-spaces, so strip it before split.
- pid, process_name = line.strip().split(' ', 1)
- if process_name_filter(process_name):
- running_pids.append(int(pid))
- except ValueError, e:
- pass
+ if sys.platform in ("cygwin"):
+ ps_process = self.run_command(['ps', '-e'], error_handler=Executive.ignore_error)
+ for line in ps_process.splitlines():
+ tokens = line.strip().split()
+ try:
+ pid, ppid, pgid, winpid, tty, uid, stime, process_name = tokens
+ if process_name_filter(process_name):
+ running_pids.append(int(pid))
+ self.pid_to_system_pid[int(pid)] = int(winpid)
+ except ValueError, e:
+ pass
+ else:
+ ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
+ stdout, _ = ps_process.communicate()
+ for line in stdout.splitlines():
+ try:
+ # In some cases the line can contain one or more
+ # leading white-spaces, so strip it before split.
+ pid, process_name = line.strip().split(' ', 1)
+ if process_name_filter(process_name):
+ running_pids.append(int(pid))
+ except ValueError, e:
+ pass
return sorted(running_pids)
@@ -307,6 +316,13 @@ class Executive(object):
while self.check_running_pid(pid):
time.sleep(0.25)
+ def wait_limited(self, pid, limit_in_seconds=None, check_frequency_in_seconds=None):
+ seconds_left = limit_in_seconds or 10
+ sleep_length = check_frequency_in_seconds or 1
+ while seconds_left > 0 and self.check_running_pid(pid):
+ seconds_left -= sleep_length
+ time.sleep(sleep_length)
+
def _windows_image_name(self, process_name):
name, extension = os.path.splitext(process_name)
if not extension:
@@ -315,6 +331,17 @@ class Executive(object):
process_name = "%s.exe" % name
return process_name
+ def interrupt(self, pid):
+ interrupt_signal = signal.SIGINT
+ # FIXME: The python docs seem to imply that platform == 'win32' may need to use signal.CTRL_C_EVENT
+ # http://docs.python.org/2/library/signal.html
+ try:
+ os.kill(pid, interrupt_signal)
+ except OSError:
+ # Silently ignore when the pid doesn't exist.
+ # It's impossible for callers to avoid race conditions with process shutdown.
+ pass
+
def kill_all(self, process_name):
"""Attempts to kill processes matching process_name.
Will fail silently if no process are found."""
@@ -365,9 +392,10 @@ class Executive(object):
input = input.encode(self._child_process_encoding())
return (self.PIPE, input)
- def _command_for_printing(self, args):
+ def command_for_printing(self, args):
"""Returns a print-ready string representing command args.
The string should be copy/paste ready for execution in a shell."""
+ args = self._stringify_args(args)
escaped_args = []
for arg in args:
if isinstance(arg, unicode):
@@ -390,8 +418,6 @@ class Executive(object):
"""Popen wrapper for convenience and to work around python bugs."""
assert(isinstance(args, list) or isinstance(args, tuple))
start_time = time.time()
- args = map(unicode, args) # Popen will throw an exception if args are non-strings (like int())
- args = map(self._encode_argument_if_needed, args)
stdin, string_to_communicate = self._compute_stdin(input)
stderr = self.STDOUT if return_stderr else None
@@ -413,7 +439,7 @@ class Executive(object):
# http://bugs.python.org/issue1731717
exit_code = process.wait()
- _log.debug('"%s" took %.2fs' % (self._command_for_printing(args), time.time() - start_time))
+ _log.debug('"%s" took %.2fs' % (self.command_for_printing(args), time.time() - start_time))
if return_exit_code:
return exit_code
@@ -457,8 +483,23 @@ class Executive(object):
return argument
return argument.encode(self._child_process_encoding())
- def popen(self, *args, **kwargs):
- return subprocess.Popen(*args, **kwargs)
+ def _stringify_args(self, args):
+ # Popen will throw an exception if args are non-strings (like int())
+ string_args = map(unicode, args)
+ # The Windows implementation of Popen cannot handle unicode strings. :(
+ return map(self._encode_argument_if_needed, string_args)
+
+ # The only required arugment to popen is named "args", the rest are optional keyword arguments.
+ def popen(self, args, **kwargs):
+ # FIXME: We should always be stringifying the args, but callers who pass shell=True
+ # expect that the exact bytes passed will get passed to the shell (even if they're wrongly encoded).
+ # shell=True is wrong for many other reasons, and we should remove this
+ # hack as soon as we can fix all callers to not use shell=True.
+ if kwargs.get('shell') == True:
+ string_args = args
+ else:
+ string_args = self._stringify_args(args)
+ return subprocess.Popen(string_args, **kwargs)
def run_in_parallel(self, command_lines_and_cwds, processes=None):
"""Runs a list of (cmd_line list, cwd string) tuples in parallel and returns a list of (retcode, stdout, stderr) tuples."""
diff --git a/Tools/Scripts/webkitpy/common/system/executive_mock.py b/Tools/Scripts/webkitpy/common/system/executive_mock.py
index a83f5b245..a3870b131 100644
--- a/Tools/Scripts/webkitpy/common/system/executive_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/executive_mock.py
@@ -63,6 +63,7 @@ class MockExecutive(object):
self._running_pids = {'test-webkitpy': os.getpid()}
self._proc = None
self.calls = []
+ self.pid_to_system_pid = {}
def check_running_pid(self, pid):
return pid in self._running_pids.values()
@@ -86,6 +87,10 @@ class MockExecutive(object):
raise ScriptError("Exception for %s" % args, output="MOCK command output")
return "MOCK output of child process"
+ def command_for_printing(self, args):
+ string_args = map(unicode, args)
+ return " ".join(string_args)
+
def run_command(self,
args,
cwd=None,
@@ -108,6 +113,10 @@ class MockExecutive(object):
input_string = ", input=%s" % input
_log.info("MOCK run_command: %s, cwd=%s%s%s" % (args, cwd, env_string, input_string))
output = "MOCK output of child process"
+
+ if self._should_throw_when_run.intersection(args):
+ raise ScriptError("Exception for %s" % args, output="MOCK command output")
+
if self._should_throw:
raise ScriptError("MOCK ScriptError", output=output)
return output
@@ -170,7 +179,7 @@ class MockExecutive2(MockExecutive):
self.calls.append(args)
assert(isinstance(args, list) or isinstance(args, tuple))
if self._exception:
- raise self._exception # pylint: disable-msg=E0702
+ raise self._exception # pylint: disable=E0702
if self._run_command_fn:
return self._run_command_fn(args)
if return_exit_code:
diff --git a/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
index 755955d34..f71201a04 100644
--- a/Tools/Scripts/webkitpy/common/system/executive_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
@@ -33,34 +33,36 @@ import signal
import subprocess
import sys
import time
-import unittest
# Since we execute this script directly as part of the unit tests, we need to ensure
# that Tools/Scripts is in sys.path for the next imports to work correctly.
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
if script_dir not in sys.path:
sys.path.append(script_dir)
+third_party_py = os.path.join(script_dir, "webkitpy", "thirdparty", "autoinstalled")
+if third_party_py not in sys.path:
+ sys.path.append(third_party_py)
+
+import unittest2 as unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.filesystem_mock import MockFileSystem
class ScriptErrorTest(unittest.TestCase):
- def test_string_from_args(self):
- error = ScriptError()
- self.assertEqual(error._string_from_args(None), 'None')
- self.assertEqual(error._string_from_args([]), '[]')
- self.assertEqual(error._string_from_args(map(str, range(30))), "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17'...")
-
def test_message_with_output(self):
error = ScriptError('My custom message!', '', -1)
self.assertEqual(error.message_with_output(), 'My custom message!')
error = ScriptError('My custom message!', '', -1, 'My output.')
self.assertEqual(error.message_with_output(), 'My custom message!\n\nMy output.')
error = ScriptError('', 'my_command!', -1, 'My output.', '/Users/username/blah')
- self.assertEqual(error.message_with_output(), 'Failed to run "my_command!" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
+ self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
- self.assertEqual(error.message_with_output(), 'Failed to run "my_command!" exit_code: -1\n\nLast 500 characters of output:\nb' + '1' * 499)
+ self.assertEqual(error.message_with_output(), 'Failed to run "\'my_command!\'" exit_code: -1\n\nLast 500 characters of output:\nb' + '1' * 499)
+
+ def test_message_with_tuple(self):
+ error = ScriptError('', ('my', 'command'), -1, 'My output.', '/Users/username/blah')
+ self.assertEqual(error.message_with_output(), 'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\nMy output.')
def never_ending_command():
"""Arguments for a command that will never end (useful for testing process
@@ -113,6 +115,17 @@ class ExecutiveTest(unittest.TestCase):
executive.run_command(command_line('echo', 'foo'))
executive.run_command(tuple(command_line('echo', 'foo')))
+ def test_auto_stringify_args(self):
+ executive = Executive()
+ executive.run_command(command_line('echo', 1))
+ executive.popen(command_line('echo', 1), stdout=executive.PIPE).wait()
+ self.assertEqual('echo 1', executive.command_for_printing(['echo', 1]))
+
+ def test_popen_args(self):
+ executive = Executive()
+ # Explicitly naming the 'args' argument should not thow an exception.
+ executive.popen(args=command_line('echo', 1), stdout=executive.PIPE).wait()
+
def test_run_command_with_unicode(self):
"""Validate that it is safe to pass unicode() objects
to Executive.run* methods, and they will return unicode()
@@ -161,11 +174,11 @@ class ExecutiveTest(unittest.TestCase):
if sys.platform == "win32":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
# We seem to get either 0 or 1 here for some reason.
- self.assertTrue(process.wait() in (0, 1))
+ self.assertIn(process.wait(), (0, 1))
elif sys.platform == "cygwin":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=98196
# cygwin seems to give us either SIGABRT or SIGKILL
- self.assertTrue(process.wait() in (-signal.SIGABRT, -signal.SIGKILL))
+ self.assertIn(process.wait(), (-signal.SIGABRT, -signal.SIGKILL))
else:
expected_exit_code = -signal.SIGKILL
self.assertEqual(process.wait(), expected_exit_code)
@@ -176,7 +189,7 @@ class ExecutiveTest(unittest.TestCase):
def serial_test_kill_all(self):
executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
- self.assertEqual(process.poll(), None) # Process is running
+ self.assertIsNone(process.poll()) # Process is running
executive.kill_all(never_ending_command()[0])
# Note: Can't use a ternary since signal.SIGTERM is undefined for sys.platform == "win32"
if sys.platform == "cygwin":
@@ -185,7 +198,7 @@ class ExecutiveTest(unittest.TestCase):
elif sys.platform == "win32":
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=54790
# We seem to get either 0 or 1 here for some reason.
- self.assertTrue(process.wait() in (0, 1))
+ self.assertIn(process.wait(), (0, 1))
else:
expected_exit_code = -signal.SIGTERM
self.assertEqual(process.wait(), expected_exit_code)
@@ -218,7 +231,7 @@ class ExecutiveTest(unittest.TestCase):
executive = Executive()
pids = executive.running_pids()
- self.assertTrue(os.getpid() in pids)
+ self.assertIn(os.getpid(), pids)
def serial_test_run_in_parallel(self):
# We run this test serially to avoid overloading the machine and throwing off the timing.
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock.py b/Tools/Scripts/webkitpy/common/system/file_lock.py
index c542777f2..3ca8b3cba 100644
--- a/Tools/Scripts/webkitpy/common/system/file_lock.py
+++ b/Tools/Scripts/webkitpy/common/system/file_lock.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# All rights reserved.
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py b/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py
index 5cd27d11d..7b1b42695 100644
--- a/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py
+++ b/Tools/Scripts/webkitpy/common/system/file_lock_integrationtest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# All rights reserved.
@@ -26,7 +25,7 @@
import os
import tempfile
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.file_lock import FileLock
diff --git a/Tools/Scripts/webkitpy/common/system/file_lock_mock.py b/Tools/Scripts/webkitpy/common/system/file_lock_mock.py
index e2c1d5cdf..f53081d1c 100644
--- a/Tools/Scripts/webkitpy/common/system/file_lock_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/file_lock_mock.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# All rights reserved.
@@ -30,7 +29,7 @@ class MockFileLock(object):
pass
def acquire_lock(self):
- pass
+ return True
def release_lock(self):
- pass
+ return True
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
index 16e9fadaa..ee0664ea0 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
@@ -395,7 +395,7 @@ class MockFileSystem(object):
def splitext(self, path):
idx = path.rfind('.')
if idx == -1:
- idx = 0
+ idx = len(path)
return (path[0:idx], path[idx:])
@@ -452,7 +452,7 @@ class ReadableBinaryFileObject(object):
class ReadableTextFileObject(ReadableBinaryFileObject):
def __init__(self, fs, path, data):
- super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data))
+ super(ReadableTextFileObject, self).__init__(fs, path, StringIO.StringIO(data.decode("utf-8")))
def close(self):
self.data.close()
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
index 391c1d954..a5983320a 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock_unittest.py
@@ -28,7 +28,7 @@
import os
import re
-import unittest
+import unittest2 as unittest
from webkitpy.common.system import filesystem_mock
@@ -82,7 +82,3 @@ class MockFileSystemTest(unittest.TestCase, filesystem_unittest.GenericFileSyste
'foo/../bar',
'foo/../bar/baz',
'../foo')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
index d656b2580..cd4ad6e4a 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_unittest.py
@@ -35,7 +35,7 @@ import os
import stat
import sys
import tempfile
-import unittest
+import unittest2 as unittest
from filesystem import FileSystem
@@ -209,6 +209,8 @@ class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):
unicode_text_string = u'\u016An\u012Dc\u014Dde\u033D'
hex_equivalent = '\xC5\xAA\x6E\xC4\xAD\x63\xC5\x8D\x64\x65\xCC\xBD'
+ malformed_text_hex = '\x4D\x69\x63\x72\x6F\x73\x6F\x66\x74\xAE\x20\x56\x69\x73\x75\x61\x6C\x20\x53\x74\x75\x64\x69\x6F\xAE\x20\x32\x30\x31\x30\x0D\x0A'
+ malformed_ignored_text_hex = '\x4D\x69\x63\x72\x6F\x73\x6F\x66\x74\x20\x56\x69\x73\x75\x61\x6C\x20\x53\x74\x75\x64\x69\x6F\x20\x32\x30\x31\x30\x0D\x0A'
try:
text_path = tempfile.mktemp(prefix='tree_unittest_')
binary_path = tempfile.mktemp(prefix='tree_unittest_')
@@ -219,6 +221,12 @@ class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):
fs.write_binary_file(binary_path, hex_equivalent)
text_contents = fs.read_text_file(binary_path)
self.assertEqual(text_contents, unicode_text_string)
+
+ self.assertRaises(ValueError, fs.write_text_file, binary_path, malformed_text_hex)
+ fs.write_binary_file(binary_path, malformed_text_hex)
+ self.assertRaises(ValueError, fs.read_text_file, binary_path)
+ text_contents = fs.read_binary_file(binary_path).decode('utf8', 'ignore')
+ self.assertEquals(text_contents, malformed_ignored_text_hex)
finally:
if text_path and fs.isfile(text_path):
os.remove(text_path)
@@ -254,7 +262,3 @@ class RealFileSystemTest(unittest.TestCase, GenericFileSystemTests):
self.assertEqual(fs.sep, os.sep)
self.assertEqual(fs.join("foo", "bar"),
os.path.join("foo", "bar"))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/system/logtesting.py b/Tools/Scripts/webkitpy/common/system/logtesting.py
index 0cfa6cb0a..1aba1726a 100644
--- a/Tools/Scripts/webkitpy/common/system/logtesting.py
+++ b/Tools/Scripts/webkitpy/common/system/logtesting.py
@@ -32,7 +32,7 @@ see the TestLogStream class, and perhaps also the LogTesting class.
"""
import logging
-import unittest
+import unittest2 as unittest
class TestLogStream(object):
diff --git a/Tools/Scripts/webkitpy/common/system/logutils_unittest.py b/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
index 6d7cc4da4..252ebf4cc 100644
--- a/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/logutils_unittest.py
@@ -24,7 +24,7 @@
import logging
import os
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.logtesting import LogTesting
from webkitpy.common.system.logtesting import TestLogStream
diff --git a/Tools/Scripts/webkitpy/common/system/outputcapture.py b/Tools/Scripts/webkitpy/common/system/outputcapture.py
index 26670d214..893b5e528 100644
--- a/Tools/Scripts/webkitpy/common/system/outputcapture.py
+++ b/Tools/Scripts/webkitpy/common/system/outputcapture.py
@@ -29,8 +29,8 @@
# Class for unittest support. Used for capturing stderr/stdout.
import logging
+import unittest # Don't use unittest2 here as the autoinstaller may not have it yet.
import sys
-import unittest
from StringIO import StringIO
@@ -94,15 +94,22 @@ class OutputCapture(object):
finally:
(stdout_string, stderr_string, logs_string) = self.restore_output()
- testcase.assertEqual(stdout_string, expected_stdout)
- testcase.assertEqual(stderr_string, expected_stderr)
+ if hasattr(testcase, 'assertMultiLineEqual'):
+ testassert = testcase.assertMultiLineEqual
+ else:
+ testassert = testcase.assertEqual
+
+ testassert(stdout_string, expected_stdout)
+ testassert(stderr_string, expected_stderr)
if expected_logs is not None:
- testcase.assertEqual(logs_string, expected_logs)
+ testassert(logs_string, expected_logs)
# This is a little strange, but I don't know where else to return this information.
return return_value
class OutputCaptureTestCaseBase(unittest.TestCase):
+ maxDiff = None
+
def setUp(self):
unittest.TestCase.setUp(self)
self.output_capture = OutputCapture()
diff --git a/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py b/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
index da4347c8d..7ef2e247a 100644
--- a/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/outputcapture_unittest.py
@@ -21,7 +21,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
@@ -43,7 +43,7 @@ class OutputCaptureTest(unittest.TestCase):
actual_stdout, actual_stderr, actual_logs = self.output.restore_output()
self.assertEqual('', actual_stdout)
self.assertEqual('', actual_stderr)
- self.assertEqual(expected_logs, actual_logs)
+ self.assertMultiLineEqual(expected_logs, actual_logs)
def test_initial_log_level(self):
self.output.capture_output()
diff --git a/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py b/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py
index 6a509f0c2..8d06916f8 100644
--- a/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/outputtee_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputtee import Tee, OutputTee
diff --git a/Tools/Scripts/webkitpy/common/system/path_unittest.py b/Tools/Scripts/webkitpy/common/system/path_unittest.py
index 7a719584d..118546e68 100644
--- a/Tools/Scripts/webkitpy/common/system/path_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/path_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
import sys
from webkitpy.common.system.systemhost import SystemHost
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo.py b/Tools/Scripts/webkitpy/common/system/platforminfo.py
index b2451f5f9..582e1996f 100644
--- a/Tools/Scripts/webkitpy/common/system/platforminfo.py
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo.py
@@ -155,7 +155,7 @@ class PlatformInfo(object):
def _win_version_tuple_from_cmd(self):
# Note that this should only ever be called on windows, so this should always work.
- ver_output = self._executive.run_command(['cmd', '/c', 'ver'])
+ ver_output = self._executive.run_command(['cmd', '/c', 'ver'], decode_output=False)
match_object = re.search(r'(?P<major>\d)\.(?P<minor>\d)\.(?P<build>\d+)', ver_output)
assert match_object, 'cmd returned an unexpected version string: ' + ver_output
return tuple(map(int, match_object.groups()))
diff --git a/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py b/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
index 327229eb9..bdb0f8661 100644
--- a/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/platforminfo_unittest.py
@@ -28,7 +28,7 @@
import platform
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
@@ -79,12 +79,12 @@ class TestPlatformInfo(unittest.TestCase):
self.assertNotEquals(info.os_version, '')
self.assertNotEquals(info.display_name(), '')
self.assertTrue(info.is_mac() or info.is_win() or info.is_linux() or info.is_freebsd())
- self.assertNotEquals(info.terminal_width(), None)
+ self.assertIsNotNone(info.terminal_width())
if info.is_mac():
self.assertTrue(info.total_bytes_memory() > 0)
else:
- self.assertEqual(info.total_bytes_memory(), None)
+ self.assertIsNone(info.total_bytes_memory())
def test_os_name_and_wrappers(self):
info = self.make_info(fake_sys('linux2'))
@@ -172,14 +172,10 @@ class TestPlatformInfo(unittest.TestCase):
self.assertEqual(info.total_bytes_memory(), 1234)
info = self.make_info(fake_sys('win32', tuple([6, 1, 7600])))
- self.assertEqual(info.total_bytes_memory(), None)
+ self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('linux2'))
- self.assertEqual(info.total_bytes_memory(), None)
+ self.assertIsNone(info.total_bytes_memory())
info = self.make_info(fake_sys('freebsd9'))
- self.assertEqual(info.total_bytes_memory(), None)
-
-
-if __name__ == '__main__':
- unittest.main()
+ self.assertIsNone(info.total_bytes_memory())
diff --git a/Tools/Scripts/webkitpy/common/system/profiler.py b/Tools/Scripts/webkitpy/common/system/profiler.py
index 264a4e238..0208cf898 100644
--- a/Tools/Scripts/webkitpy/common/system/profiler.py
+++ b/Tools/Scripts/webkitpy/common/system/profiler.py
@@ -28,19 +28,44 @@
import logging
import re
+import itertools
_log = logging.getLogger(__name__)
class ProfilerFactory(object):
@classmethod
- def create_profiler(cls, host, executable_path, output_dir, identifier=None):
- if host.platform.is_mac():
- return Instruments(host, executable_path, output_dir, identifier)
- return GooglePProf(host, executable_path, output_dir, identifier)
+ def create_profiler(cls, host, executable_path, output_dir, profiler_name=None, identifier=None):
+ profilers = cls.profilers_for_platform(host.platform)
+ if not profilers:
+ return None
+ profiler_name = profiler_name or cls.default_profiler_name(host.platform)
+ profiler_class = next(itertools.ifilter(lambda profiler: profiler.name == profiler_name, profilers), None)
+ if not profiler_class:
+ return None
+ return profilers[0](host, executable_path, output_dir, identifier)
+
+ @classmethod
+ def default_profiler_name(cls, platform):
+ profilers = cls.profilers_for_platform(platform)
+ return profilers[0].name if profilers else None
+
+ @classmethod
+ def profilers_for_platform(cls, platform):
+ # GooglePProf requires TCMalloc/google-perftools, but is available everywhere.
+ profilers_by_os_name = {
+ 'mac': [IProfiler, Sample, GooglePProf],
+ 'linux': [Perf, GooglePProf],
+ # Note: freebsd, win32 have no profilers defined yet, thus --profile will be ignored
+ # by default, but a profiler can be selected with --profiler=PROFILER explicitly.
+ }
+ return profilers_by_os_name.get(platform.os_name, [])
class Profiler(object):
+ # Used by ProfilerFactory to lookup a profiler from the --profiler=NAME option.
+ name = None
+
def __init__(self, host, executable_path, output_dir, identifier=None):
self._host = host
self._executable_path = executable_path
@@ -61,10 +86,14 @@ class Profiler(object):
class SingleFileOutputProfiler(Profiler):
def __init__(self, host, executable_path, output_dir, output_suffix, identifier=None):
super(SingleFileOutputProfiler, self).__init__(host, executable_path, output_dir, identifier)
- self._output_path = self._host.workspace.find_unused_filename(self._output_dir, self._identifier, output_suffix)
+ # FIXME: Currently all reports are kept as test.*, until we fix that, search up to 1000 names before giving up.
+ self._output_path = self._host.workspace.find_unused_filename(self._output_dir, self._identifier, output_suffix, search_limit=1000)
+ assert(self._output_path)
class GooglePProf(SingleFileOutputProfiler):
+ name = 'pprof'
+
def __init__(self, host, executable_path, output_dir, identifier=None):
super(GooglePProf, self).__init__(host, executable_path, output_dir, "pprof", identifier)
@@ -76,24 +105,106 @@ class GooglePProf(SingleFileOutputProfiler):
match = re.search("^Total:[^\n]*\n((?:[^\n]*\n){0,10})", pprof_output, re.MULTILINE)
return match.group(1) if match else None
- def profile_after_exit(self):
+ def _pprof_path(self):
# FIXME: We should have code to find the right google-pprof executable, some Googlers have
# google-pprof installed as "pprof" on their machines for them.
- # FIXME: Similarly we should find the right perl!
- pprof_args = ['/usr/bin/perl', '/usr/bin/google-pprof', '--text', self._executable_path, self._output_path]
+ return '/usr/bin/google-pprof'
+
+ def profile_after_exit(self):
+ # google-pprof doesn't check its arguments, so we have to.
+ if not (self._host.filesystem.exists(self._output_path)):
+ print "Failed to gather profile, %s does not exist." % self._output_path
+ return
+
+ pprof_args = [self._pprof_path(), '--text', self._executable_path, self._output_path]
profile_text = self._host.executive.run_command(pprof_args)
+ print "First 10 lines of pprof --text:"
print self._first_ten_lines_of_profile(profile_text)
+ print "http://google-perftools.googlecode.com/svn/trunk/doc/cpuprofile.html documents output."
+ print
+ print "To interact with the the full profile, including produce graphs:"
+ print ' '.join([self._pprof_path(), self._executable_path, self._output_path])
+
+class Perf(SingleFileOutputProfiler):
+ name = 'perf'
-# FIXME: iprofile is a newer commandline interface to replace /usr/bin/instruments.
-class Instruments(SingleFileOutputProfiler):
def __init__(self, host, executable_path, output_dir, identifier=None):
- super(Instruments, self).__init__(host, executable_path, output_dir, "trace", identifier)
+ super(Perf, self).__init__(host, executable_path, output_dir, "data", identifier)
+ self._perf_process = None
+ self._pid_being_profiled = None
- # FIXME: We may need a way to find this tracetemplate on the disk
- _time_profile = "/Applications/Xcode.app/Contents/Applications/Instruments.app/Contents/Resources/templates/Time Profiler.tracetemplate"
+ def _perf_path(self):
+ # FIXME: We may need to support finding the perf binary in other locations.
+ return 'perf'
def attach_to_pid(self, pid):
- cmd = ["instruments", "-t", self._time_profile, "-D", self._output_path, "-p", pid]
- cmd = map(unicode, cmd)
- self._host.executive.popen(cmd)
+ assert(not self._perf_process and not self._pid_being_profiled)
+ self._pid_being_profiled = pid
+ cmd = [self._perf_path(), "record", "--call-graph", "--pid", pid, "--output", self._output_path]
+ self._perf_process = self._host.executive.popen(cmd)
+
+ def _first_ten_lines_of_profile(self, perf_output):
+ match = re.search("^#[^\n]*\n((?: [^\n]*\n){1,10})", perf_output, re.MULTILINE)
+ return match.group(1) if match else None
+
+ def profile_after_exit(self):
+ # Perf doesn't automatically watch the attached pid for death notifications,
+ # so we have to do it for it, and then tell it its time to stop sampling. :(
+ self._host.executive.wait_limited(self._pid_being_profiled, limit_in_seconds=10)
+ perf_exitcode = self._perf_process.poll()
+ if perf_exitcode is None: # This should always be the case, unless perf error'd out early.
+ self._host.executive.interrupt(self._perf_process.pid)
+
+ perf_exitcode = self._perf_process.wait()
+ if perf_exitcode not in (0, -2): # The exit code should always be -2, as we're always interrupting perf.
+ print "'perf record' failed (exit code: %i), can't process results:" % perf_exitcode
+ return
+
+ perf_args = [self._perf_path(), 'report', '--call-graph', 'none', '--input', self._output_path]
+ print "First 10 lines of 'perf report --call-graph=none':"
+
+ print " ".join(perf_args)
+ perf_output = self._host.executive.run_command(perf_args)
+ print self._first_ten_lines_of_profile(perf_output)
+
+ print "To view the full profile, run:"
+ print ' '.join([self._perf_path(), 'report', '-i', self._output_path])
+ print # An extra line between tests looks nicer.
+
+
+class Sample(SingleFileOutputProfiler):
+ name = 'sample'
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ super(Sample, self).__init__(host, executable_path, output_dir, "txt", identifier)
+ self._profiler_process = None
+
+ def attach_to_pid(self, pid):
+ cmd = ["sample", pid, "-mayDie", "-file", self._output_path]
+ self._profiler_process = self._host.executive.popen(cmd)
+
+ def profile_after_exit(self):
+ self._profiler_process.wait()
+
+
+class IProfiler(SingleFileOutputProfiler):
+ name = 'iprofiler'
+
+ def __init__(self, host, executable_path, output_dir, identifier=None):
+ super(IProfiler, self).__init__(host, executable_path, output_dir, "dtps", identifier)
+ self._profiler_process = None
+
+ def attach_to_pid(self, pid):
+ # FIXME: iprofiler requires us to pass the directory separately
+ # from the basename of the file, with no control over the extension.
+ fs = self._host.filesystem
+ cmd = ["iprofiler", "-timeprofiler", "-a", pid,
+ "-d", fs.dirname(self._output_path), "-o", fs.splitext(fs.basename(self._output_path))[0]]
+ # FIXME: Consider capturing instead of letting instruments spam to stderr directly.
+ self._profiler_process = self._host.executive.popen(cmd)
+
+ def profile_after_exit(self):
+ # It seems like a nicer user experiance to wait on the profiler to exit to prevent
+ # it from spewing to stderr at odd times.
+ self._profiler_process.wait()
diff --git a/Tools/Scripts/webkitpy/common/system/profiler_unittest.py b/Tools/Scripts/webkitpy/common/system/profiler_unittest.py
index 059b7cfa1..22bc2df6e 100644
--- a/Tools/Scripts/webkitpy/common/system/profiler_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/profiler_unittest.py
@@ -26,25 +26,41 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
+from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from .profiler import ProfilerFactory, Instruments, GooglePProf
+from .profiler import ProfilerFactory, GooglePProf
class ProfilerFactoryTest(unittest.TestCase):
- def test_basic(self):
+ def _assert_default_profiler_name(self, os_name, expected_profiler_name):
+ profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
+ self.assertEqual(profiler_name, expected_profiler_name)
+
+ def test_default_profilers(self):
+ self._assert_default_profiler_name('mac', 'iprofiler')
+ self._assert_default_profiler_name('linux', 'perf')
+ self._assert_default_profiler_name('win32', None)
+ self._assert_default_profiler_name('freebsd', None)
+
+ def test_default_profiler_output(self):
host = MockSystemHost()
self.assertFalse(host.filesystem.exists("/tmp/output"))
+
+ # Default mocks are Mac, so iprofile should be default.
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertTrue(host.filesystem.exists("/tmp/output"))
- self.assertEquals(profiler._output_path, "/tmp/output/test.trace")
+ self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
+ # Linux defaults to perf.
host.platform.os_name = 'linux'
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
- self.assertEquals(profiler._output_path, "/tmp/output/test.pprof")
+ self.assertEqual(profiler._output_path, "/tmp/output/test.data")
+
+class GooglePProfTest(unittest.TestCase):
def test_pprof_output_regexp(self):
pprof_output = """
sometimes
@@ -84,4 +100,4 @@ Total: 3770 samples
"""
host = MockSystemHost()
profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
- self.assertEquals(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
+ self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
diff --git a/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py b/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
index 625acf2b3..3050adc99 100644
--- a/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/stack_utils_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system import outputcapture
from webkitpy.common.system import stack_utils
@@ -42,11 +42,11 @@ class StackUtilsTest(unittest.TestCase):
def test_find_thread_stack_found(self):
thread_id = current_thread_id()
found_stack = stack_utils._find_thread_stack(thread_id)
- self.assertNotEqual(found_stack, None)
+ self.assertIsNotNone(found_stack)
def test_find_thread_stack_not_found(self):
found_stack = stack_utils._find_thread_stack(0)
- self.assertEqual(found_stack, None)
+ self.assertIsNone(found_stack)
def test_log_thread_state(self):
msgs = []
diff --git a/Tools/Scripts/webkitpy/common/system/user_unittest.py b/Tools/Scripts/webkitpy/common/system/user_unittest.py
index bd86d228f..49810b2e0 100644
--- a/Tools/Scripts/webkitpy/common/system/user_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/user_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user import User
diff --git a/Tools/Scripts/webkitpy/common/system/workspace.py b/Tools/Scripts/webkitpy/common/system/workspace.py
index 686837619..1d92aca13 100644
--- a/Tools/Scripts/webkitpy/common/system/workspace.py
+++ b/Tools/Scripts/webkitpy/common/system/workspace.py
@@ -67,7 +67,7 @@ class Workspace(object):
try:
self._executive.run_command(['zip', '-9', '-r', zip_path, '.'], cwd=source_path)
except ScriptError, e:
- _log.error("Workspace.create_zip failed:\n%s" % e.message_with_output())
+ _log.error("Workspace.create_zip failed in %s:\n%s" % (source_path, e.message_with_output()))
return None
return zip_class(zip_path)
diff --git a/Tools/Scripts/webkitpy/common/system/workspace_mock.py b/Tools/Scripts/webkitpy/common/system/workspace_mock.py
index 005f86cf3..02a5f4c29 100644
--- a/Tools/Scripts/webkitpy/common/system/workspace_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/workspace_mock.py
@@ -32,4 +32,6 @@ class MockWorkspace(object):
return "%s/%s.%s" % (directory, name, extension)
def create_zip(self, zip_path, source_path):
+ self.zip_path = zip_path
+ self.source_path = source_path
return object() # Something that is not None
diff --git a/Tools/Scripts/webkitpy/common/system/workspace_unittest.py b/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
index eca386ac3..8262f6cf1 100644
--- a/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/workspace_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
@@ -60,7 +60,7 @@ class WorkspaceTest(unittest.TestCase):
def test_create_zip_exception(self):
workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True))
expected_logs = """MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path
-Workspace.create_zip failed:
+Workspace.create_zip failed in /source/path:
MOCK ScriptError
MOCK output of child process
@@ -69,4 +69,4 @@ MOCK output of child process
def __init__(self, path):
self.filename = path
archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
- self.assertEqual(archive, None)
+ self.assertIsNone(archive)
diff --git a/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py b/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py
index 22ba72082..1a0603c9e 100644
--- a/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/zipfileset_unittest.py
@@ -23,7 +23,7 @@
import shutil
import tempfile
-import unittest
+import unittest2 as unittest
import zipfile
from webkitpy.common.system.filesystem_mock import MockFileSystem
@@ -92,7 +92,3 @@ class ZipFileSetTest(unittest.TestCase):
def test_namelist(self):
self.assertTrue('some-file' in self._zip.namelist())
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py b/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py
index f731db2b2..1a4677230 100644
--- a/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py
+++ b/Tools/Scripts/webkitpy/common/thread/messagepump_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.thread.messagepump import MessagePump, MessagePumpDelegate
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
diff --git a/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py
index cb67c1e82..dbb8a2e42 100644
--- a/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py
+++ b/Tools/Scripts/webkitpy/common/thread/threadedmessagequeue_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
diff --git a/Tools/Scripts/webkitpy/common/version_check.py b/Tools/Scripts/webkitpy/common/version_check.py
index 6acc9b439..c0505443f 100644
--- a/Tools/Scripts/webkitpy/common/version_check.py
+++ b/Tools/Scripts/webkitpy/common/version_check.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py
index 7ae45fa3f..b222d3a50 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/amountchangedpattern_unittest.py
@@ -31,7 +31,7 @@
import re
-import unittest
+import unittest2 as unittest
from webkitpy.common.watchlist.amountchangedpattern import AmountChangedPattern
diff --git a/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py
index 1f2aeda23..2f5fd68be 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/changedlinepattern_unittest.py
@@ -29,7 +29,7 @@
'''Unit tests for changedlinepattern.py.'''
import re
-import unittest
+import unittest2 as unittest
from webkitpy.common.watchlist.changedlinepattern import ChangedLinePattern
diff --git a/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py
index 0afdf3005..2b51dd68f 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/filenamepattern_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
-import unittest
+import unittest2 as unittest
from webkitpy.common.watchlist.filenamepattern import FilenamePattern
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py
index 67ff3b01a..dd7b083ea 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlist_unittest.py
@@ -28,7 +28,7 @@
'''Unit tests for watchlist.py.'''
-import unittest
+import unittest2 as unittest
from webkitpy.common.checkout.diff_test_data import DIFF_TEST_DATA
from webkitpy.common.watchlist.watchlistparser import WatchListParser
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py
deleted file mode 100644
index aa816e37c..000000000
--- a/Tools/Scripts/webkitpy/common/watchlist/watchlistloader.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.common.watchlist.watchlistparser import WatchListParser
-
-
-class WatchListLoader(object):
- def __init__(self, filesystem):
- self._filesystem = filesystem
-
- def load(self):
- config_path = self._filesystem.dirname(self._filesystem.path_to_module('webkitpy.common.config'))
- watch_list_full_path = self._filesystem.join(config_path, 'watchlist')
- if not self._filesystem.exists(watch_list_full_path):
- raise Exception('Watch list file (%s) not found.' % watch_list_full_path)
-
- watch_list_contents = self._filesystem.read_text_file(watch_list_full_path)
- return WatchListParser().parse(watch_list_contents)
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py
deleted file mode 100644
index 8d3fa9806..000000000
--- a/Tools/Scripts/webkitpy/common/watchlist/watchlistloader_unittest.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-'''Unit tests for watchlistloader.py.'''
-
-from webkitpy.common import webkitunittest
-from webkitpy.common.system import filesystem_mock
-from webkitpy.common.system import filesystem
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.common.watchlist.watchlistloader import WatchListLoader
-
-
-class WatchListLoaderTest(webkitunittest.TestCase):
- def test_watch_list_not_found(self):
- loader = WatchListLoader(filesystem_mock.MockFileSystem())
- self.assertRaisesRegexp(Exception, r'Watch list file \(.*/watchlist\) not found\.', loader.load)
-
- def test_watch_list_load(self):
- # Test parsing of the checked-in watch list.
- OutputCapture().assert_outputs(self, WatchListLoader(filesystem.FileSystem()).load, expected_logs="")
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py
index c72eab36e..1d3f581b6 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser.py
@@ -152,7 +152,7 @@ class WatchListParser(object):
# modifying a list while iterating through it leads to undefined behavior.
intructions_copy = cc_rule.instructions()[:]
for email in intructions_copy:
- if not accounts.account_by_login(email):
+ if not accounts.contributor_by_email(email):
cc_rule.remove_instruction(email)
self._log_error("The email alias %s which is in the watchlist is not listed as a contributor in committers.py" % email)
continue
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py
index 3bd4dc2d9..d06a72dac 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistparser_unittest.py
@@ -185,6 +185,23 @@ class WatchListParserTest(webkitunittest.TestCase):
expected_logs='The email alias levin+bad+email@chromium.org which is'
+ ' in the watchlist is not listed as a contributor in committers.py\n')
+ def test_cc_rule_with_secondary_email(self):
+ # FIXME: We should provide a mock of CommitterList so that we can test this on fake data.
+ watch_list = (
+ '{'
+ ' "DEFINITIONS": {'
+ ' "WatchList1": {'
+ ' "filename": r".*\\MyFileName\\.cpp",'
+ ' },'
+ ' },'
+ ' "CC_RULES": {'
+ ' "WatchList1": ["ojan.autocc@gmail.com"],'
+ ' },'
+ '}')
+
+ OutputCapture().assert_outputs(self, self._watch_list_parser.parse, args=[watch_list],
+ expected_logs='')
+
def test_empty_message_rule(self):
watch_list = (
'{'
diff --git a/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py b/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py
index 92aaf34ab..d926887e5 100644
--- a/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py
+++ b/Tools/Scripts/webkitpy/common/watchlist/watchlistrule_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.watchlist.watchlistrule import WatchListRule
diff --git a/Tools/Scripts/webkitpy/common/webkit_finder.py b/Tools/Scripts/webkitpy/common/webkit_finder.py
index 3705ef37f..7b9c0145e 100644
--- a/Tools/Scripts/webkitpy/common/webkit_finder.py
+++ b/Tools/Scripts/webkitpy/common/webkit_finder.py
@@ -44,7 +44,7 @@ class WebKitFinder(object):
if not self._webkit_base:
self._webkit_base = self._webkit_base
module_path = self._filesystem.path_to_module(self.__module__)
- tools_index = module_path.find('Tools')
+ tools_index = module_path.rfind('Tools')
assert tools_index != -1, "could not find location of this checkout from %s" % module_path
self._webkit_base = self._filesystem.normpath(module_path[0:tools_index - 1])
return self._webkit_base
diff --git a/Tools/Scripts/webkitpy/common/webkitunittest.py b/Tools/Scripts/webkitpy/common/webkitunittest.py
index 7b650a1eb..dd6152379 100644
--- a/Tools/Scripts/webkitpy/common/webkitunittest.py
+++ b/Tools/Scripts/webkitpy/common/webkitunittest.py
@@ -29,7 +29,7 @@
'''Basic unit test functionality.'''
import re
-import unittest
+import unittest2 as unittest
class TestCase(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
index 54ce5e6fe..3fd40e38f 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
@@ -28,12 +28,12 @@
import logging
import math
-import re
import threading
import time
from webkitpy.common import message_pool
from webkitpy.layout_tests.controllers import single_test_runner
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
@@ -61,54 +61,47 @@ class TestRunInterruptedException(Exception):
class LayoutTestRunner(object):
- def __init__(self, options, port, printer, results_directory, expectations, test_is_slow_fn):
+ def __init__(self, options, port, printer, results_directory, test_is_slow_fn):
self._options = options
self._port = port
self._printer = printer
self._results_directory = results_directory
- self._expectations = None
self._test_is_slow = test_is_slow_fn
self._sharder = Sharder(self._port.split_test, self._options.max_locked_shards)
+ self._filesystem = self._port.host.filesystem
- self._current_result_summary = None
+ self._expectations = None
+ self._test_inputs = []
self._needs_http = None
self._needs_websockets = None
self._retrying = False
- self._test_files_list = []
- self._all_results = []
- self._group_stats = {}
- self._worker_stats = {}
- self._filesystem = self._port.host.filesystem
- def run_tests(self, test_inputs, expectations, result_summary, num_workers, needs_http, needs_websockets, retrying):
- """Returns a tuple of (interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings):
- interrupted is whether the run was interrupted
- keyboard_interrupted is whether the interruption was because someone typed Ctrl^C
- thread_timings is a list of dicts with the total runtime
- of each thread with 'name', 'num_tests', 'total_time' properties
- test_timings is a list of timings for each sharded subdirectory
- of the form [time, directory_name, num_tests]
- individual_test_timings is a list of run times for each test
- in the form {filename:filename, test_run_time:test_run_time}
- result_summary: summary object to populate with the results
- """
- self._current_result_summary = result_summary
+ self._current_run_results = None
+ self._remaining_locked_shards = []
+ self._has_http_lock = False
+
+ def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying):
self._expectations = expectations
+ self._test_inputs = test_inputs
self._needs_http = needs_http
self._needs_websockets = needs_websockets
self._retrying = retrying
- self._test_files_list = [test_input.test_name for test_input in test_inputs]
- self._printer.num_tests = len(self._test_files_list)
- self._printer.num_completed = 0
- self._all_results = []
- self._group_stats = {}
- self._worker_stats = {}
- self._has_http_lock = False
+ # FIXME: rename all variables to test_run_results or some such ...
+ run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip))
+ self._current_run_results = run_results
self._remaining_locked_shards = []
+ self._has_http_lock = False
+ self._printer.num_tests = len(test_inputs)
+ self._printer.num_started = 0
+
+ if not retrying:
+ self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type)
- keyboard_interrupted = False
- interrupted = False
+ for test_name in set(tests_to_skip):
+ result = test_results.TestResult(test_name)
+ result.type = test_expectations.SKIP
+ run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name))
self._printer.write_update('Sharding tests ...')
locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)
@@ -123,35 +116,34 @@ class LayoutTestRunner(object):
all_shards = locked_shards + unlocked_shards
self._remaining_locked_shards = locked_shards
- if self._port.requires_http_server() or (locked_shards and self._options.http):
+ if locked_shards and self._options.http:
self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))
num_workers = min(num_workers, len(all_shards))
self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
if self._options.dry_run:
- return (keyboard_interrupted, interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
+ return run_results
self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
try:
with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
+ except TestRunInterruptedException, e:
+ _log.warning(e.reason)
+ run_results.interrupted = True
except KeyboardInterrupt:
self._printer.flush()
self._printer.writeln('Interrupted, exiting ...')
- keyboard_interrupted = True
- except TestRunInterruptedException, e:
- _log.warning(e.reason)
- interrupted = True
+ raise
except Exception, e:
_log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e)))
raise
finally:
self.stop_servers_with_lock()
- # FIXME: should this be a class instead of a tuple?
- return (interrupted, keyboard_interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
+ return run_results
def _worker_factory(self, worker_connection):
results_directory = self._results_directory
@@ -160,37 +152,37 @@ class LayoutTestRunner(object):
results_directory = self._filesystem.join(self._results_directory, 'retries')
return Worker(worker_connection, results_directory, self._options)
- def _mark_interrupted_tests_as_skipped(self, result_summary):
- for test_name in self._test_files_list:
- if test_name not in result_summary.results:
- result = test_results.TestResult(test_name, [test_failures.FailureEarlyExit()])
+ def _mark_interrupted_tests_as_skipped(self, run_results):
+ for test_input in self._test_inputs:
+ if test_input.test_name not in run_results.results_by_name:
+ result = test_results.TestResult(test_input.test_name, [test_failures.FailureEarlyExit()])
# FIXME: We probably need to loop here if there are multiple iterations.
# FIXME: Also, these results are really neither expected nor unexpected. We probably
# need a third type of result.
- result_summary.add(result, expected=False, test_is_slow=self._test_is_slow(test_name))
+ run_results.add(result, expected=False, test_is_slow=self._test_is_slow(test_input.test_name))
- def _interrupt_if_at_failure_limits(self, result_summary):
+ def _interrupt_if_at_failure_limits(self, run_results):
# Note: The messages in this method are constructed to match old-run-webkit-tests
# so that existing buildbot grep rules work.
- def interrupt_if_at_failure_limit(limit, failure_count, result_summary, message):
+ def interrupt_if_at_failure_limit(limit, failure_count, run_results, message):
if limit and failure_count >= limit:
- message += " %d tests run." % (result_summary.expected + result_summary.unexpected)
- self._mark_interrupted_tests_as_skipped(result_summary)
+ message += " %d tests run." % (run_results.expected + run_results.unexpected)
+ self._mark_interrupted_tests_as_skipped(run_results)
raise TestRunInterruptedException(message)
interrupt_if_at_failure_limit(
self._options.exit_after_n_failures,
- result_summary.unexpected_failures,
- result_summary,
- "Exiting early after %d failures." % result_summary.unexpected_failures)
+ run_results.unexpected_failures,
+ run_results,
+ "Exiting early after %d failures." % run_results.unexpected_failures)
interrupt_if_at_failure_limit(
self._options.exit_after_n_crashes_or_timeouts,
- result_summary.unexpected_crashes + result_summary.unexpected_timeouts,
- result_summary,
+ run_results.unexpected_crashes + run_results.unexpected_timeouts,
+ run_results,
# This differs from ORWT because it does not include WebProcess crashes.
- "Exiting early after %d crashes and %d timeouts." % (result_summary.unexpected_crashes, result_summary.unexpected_timeouts))
+ "Exiting early after %d crashes and %d timeouts." % (run_results.unexpected_crashes, run_results.unexpected_timeouts))
- def _update_summary_with_result(self, result_summary, result):
+ def _update_summary_with_result(self, run_results, result):
if result.type == test_expectations.SKIP:
exp_str = got_str = 'SKIP'
expected = True
@@ -199,11 +191,11 @@ class LayoutTestRunner(object):
exp_str = self._expectations.get_expectations_string(result.test_name)
got_str = self._expectations.expectation_to_string(result.type)
- result_summary.add(result, expected, self._test_is_slow(result.test_name))
+ run_results.add(result, expected, self._test_is_slow(result.test_name))
self._printer.print_finished_test(result, expected, exp_str, got_str)
- self._interrupt_if_at_failure_limits(result_summary)
+ self._interrupt_if_at_failure_limits(run_results)
def start_servers_with_lock(self, number_of_servers):
self._printer.write_update('Acquiring http lock ...')
@@ -237,9 +229,7 @@ class LayoutTestRunner(object):
def _handle_started_test(self, worker_name, test_input, test_timeout_sec):
self._printer.print_started_test(test_input.test_name)
- def _handle_finished_test_list(self, worker_name, list_name, num_tests, elapsed_time):
- self._group_stats[list_name] = (num_tests, elapsed_time)
-
+ def _handle_finished_test_list(self, worker_name, list_name):
def find(name, test_lists):
for i in range(len(test_lists)):
if test_lists[i].name == name:
@@ -249,15 +239,11 @@ class LayoutTestRunner(object):
index = find(list_name, self._remaining_locked_shards)
if index >= 0:
self._remaining_locked_shards.pop(index)
- if not self._remaining_locked_shards and not self._port.requires_http_server():
+ if not self._remaining_locked_shards:
self.stop_servers_with_lock()
- def _handle_finished_test(self, worker_name, result, elapsed_time, log_messages=[]):
- self._worker_stats.setdefault(worker_name, {'name': worker_name, 'num_tests': 0, 'total_time': 0})
- self._worker_stats[worker_name]['total_time'] += elapsed_time
- self._worker_stats[worker_name]['num_tests'] += 1
- self._all_results.append(result)
- self._update_summary_with_result(self._current_result_summary, result)
+ def _handle_finished_test(self, worker_name, result, log_messages=[]):
+ self._update_summary_with_result(self._current_run_results, result)
class Worker(object):
@@ -275,8 +261,7 @@ class Worker(object):
self._batch_count = None
self._filesystem = None
self._driver = None
- self._tests_run_file = None
- self._tests_run_filename = None
+ self._num_tests = 0
def __del__(self):
self.stop()
@@ -291,16 +276,12 @@ class Worker(object):
self._batch_count = 0
self._batch_size = self._options.batch_size or 0
- tests_run_filename = self._filesystem.join(self._results_directory, "tests_run%d.txt" % self._worker_number)
- self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
def handle(self, name, source, test_list_name, test_inputs):
assert name == 'test_list'
- start_time = time.time()
for test_input in test_inputs:
- self._run_test(test_input)
- elapsed_time = time.time() - start_time
- self._caller.post('finished_test_list', test_list_name, len(test_inputs), elapsed_time)
+ self._run_test(test_input, test_list_name)
+ self._caller.post('finished_test_list', test_list_name)
def _update_test_input(self, test_input):
if test_input.reference_files is None:
@@ -311,7 +292,7 @@ class Worker(object):
else:
test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
- def _run_test(self, test_input):
+ def _run_test(self, test_input, shard_name):
self._batch_count += 1
stop_when_done = False
@@ -325,18 +306,19 @@ class Worker(object):
self._caller.post('started_test', test_input, test_timeout_sec)
result = self._run_test_with_timeout(test_input, test_timeout_sec, stop_when_done)
+ result.shard_name = shard_name
+ result.worker_name = self._name
+ result.total_run_time = time.time() - start
+ result.test_number = self._num_tests
+ self._num_tests += 1
- elapsed_time = time.time() - start
- self._caller.post('finished_test', result, elapsed_time)
+ self._caller.post('finished_test', result)
self._clean_up_after_test(test_input, result)
def stop(self):
_log.debug("%s cleaning up" % self._name)
self._kill_driver()
- if self._tests_run_file:
- self._tests_run_file.close()
- self._tests_run_file = None
def _timeout(self, test_input):
"""Compute the appropriate timeout value for a test."""
@@ -370,7 +352,6 @@ class Worker(object):
def _clean_up_after_test(self, test_input, result):
test_name = test_input.test_name
- self._tests_run_file.write(test_name + "\n")
if result.failures:
# Check and kill DumpRenderTree if we need to.
@@ -451,8 +432,8 @@ class Worker(object):
return self._run_single_test(self._driver, test_input, stop_when_done)
def _run_single_test(self, driver, test_input, stop_when_done):
- return single_test_runner.run_single_test(self._port, self._options,
- test_input, driver, self._name, stop_when_done)
+ return single_test_runner.run_single_test(self._port, self._options, self._results_directory,
+ self._name, driver, test_input, stop_when_done)
class TestShard(object):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
index 13841c1e2..406870a96 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2012 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
@@ -28,7 +27,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
@@ -36,19 +35,22 @@ from webkitpy.layout_tests import run_webkit_tests
from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
-from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
from webkitpy.layout_tests.models.test_input import TestInput
from webkitpy.layout_tests.models.test_results import TestResult
-from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.port.test import TestPort
TestExpectations = test_expectations.TestExpectations
class FakePrinter(object):
- num_completed = 0
+ num_started = 0
num_tests = 0
+ def print_expected(self, run_results, get_tests_with_result_type):
+ pass
+
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
pass
@@ -70,7 +72,7 @@ class FakePrinter(object):
class LockCheckingRunner(LayoutTestRunner):
def __init__(self, port, options, printer, tester, http_lock):
- super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), TestExpectations(port, []), lambda test_name: False)
+ super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), lambda test_name: False)
self._finished_list_called = False
self._tester = tester
self._should_have_http_lock = http_lock
@@ -99,13 +101,10 @@ class LayoutTestRunnerTests(unittest.TestCase):
port = port or host.port_factory.get(options.platform, options=options)
return LockCheckingRunner(port, options, FakePrinter(), self, True)
- def _result_summary(self, runner, tests):
- return ResultSummary(TestExpectations(runner._port, tests), tests, 1, set())
-
def _run_tests(self, runner, tests):
test_inputs = [TestInput(test, 6000) for test in tests]
expectations = TestExpectations(runner._port, tests)
- runner.run_tests(test_inputs, expectations, self._result_summary(runner, tests),
+ runner.run_tests(expectations, test_inputs, set(),
num_workers=1, needs_http=any('http' in test for test in tests), needs_websockets=any(['websocket' in test for test in tests]), retrying=False)
def test_http_locking(self):
@@ -121,29 +120,29 @@ class LayoutTestRunnerTests(unittest.TestCase):
runner._options.exit_after_n_failures = None
runner._options.exit_after_n_crashes_or_times = None
test_names = ['passes/text.html', 'passes/image.html']
- runner._test_files_list = test_names
+ runner._test_inputs = [TestInput(test_name, 6000) for test_name in test_names]
- result_summary = self._result_summary(runner, test_names)
- result_summary.unexpected_failures = 100
- result_summary.unexpected_crashes = 50
- result_summary.unexpected_timeouts = 50
+ run_results = TestRunResults(TestExpectations(runner._port, test_names), len(test_names))
+ run_results.unexpected_failures = 100
+ run_results.unexpected_crashes = 50
+ run_results.unexpected_timeouts = 50
# No exception when the exit_after* options are None.
- runner._interrupt_if_at_failure_limits(result_summary)
+ runner._interrupt_if_at_failure_limits(run_results)
# No exception when we haven't hit the limit yet.
runner._options.exit_after_n_failures = 101
runner._options.exit_after_n_crashes_or_timeouts = 101
- runner._interrupt_if_at_failure_limits(result_summary)
+ runner._interrupt_if_at_failure_limits(run_results)
# Interrupt if we've exceeded either limit:
runner._options.exit_after_n_crashes_or_timeouts = 10
- self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
- self.assertEqual(result_summary.results['passes/text.html'].type, test_expectations.SKIP)
- self.assertEqual(result_summary.results['passes/image.html'].type, test_expectations.SKIP)
+ self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
+ self.assertEqual(run_results.results_by_name['passes/text.html'].type, test_expectations.SKIP)
+ self.assertEqual(run_results.results_by_name['passes/image.html'].type, test_expectations.SKIP)
runner._options.exit_after_n_crashes_or_timeouts = None
runner._options.exit_after_n_failures = 10
- exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
+ exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, run_results)
def test_update_summary_with_result(self):
# Reftests expected to be image mismatch should be respected when pixel_tests=False.
@@ -153,17 +152,17 @@ class LayoutTestRunnerTests(unittest.TestCase):
expectations = TestExpectations(runner._port, tests=[test])
runner._expectations = expectations
- result_summary = ResultSummary(expectations, [test], 1, set())
+ run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()], reftest_type=['!='])
- runner._update_summary_with_result(result_summary, result)
- self.assertEqual(1, result_summary.expected)
- self.assertEqual(0, result_summary.unexpected)
+ runner._update_summary_with_result(run_results, result)
+ self.assertEqual(1, run_results.expected)
+ self.assertEqual(0, run_results.unexpected)
- result_summary = ResultSummary(expectations, [test], 1, set())
+ run_results = TestRunResults(expectations, 1)
result = TestResult(test_name=test, failures=[], reftest_type=['=='])
- runner._update_summary_with_result(result_summary, result)
- self.assertEqual(0, result_summary.expected)
- self.assertEqual(1, result_summary.unexpected)
+ runner._update_summary_with_result(run_results, result)
+ self.assertEqual(0, run_results.expected)
+ self.assertEqual(1, run_results.unexpected)
def test_servers_started(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 5986d5396..28a8930d2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
@@ -35,29 +34,21 @@ objects to the Manager. The Manager then aggregates the TestFailures to
create a final report.
"""
-import errno
+import json
import logging
-import math
-import Queue
import random
-import re
import sys
import time
-from webkitpy.common import message_pool
from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
-from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, TestRunInterruptedException, WorkerException
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_layout_results_generator
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
-from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_run_results
from webkitpy.layout_tests.models.test_input import TestInput
-from webkitpy.layout_tests.models.result_summary import ResultSummary
-from webkitpy.layout_tests.views import printing
-
-from webkitpy.tool import grammar
_log = logging.getLogger(__name__)
@@ -67,192 +58,6 @@ BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
TestExpectations = test_expectations.TestExpectations
-def interpret_test_failures(port, test_name, failures):
- """Interpret test failures and returns a test result as dict.
-
- Args:
- port: interface to port-specific hooks
- test_name: test name relative to layout_tests directory
- failures: list of test failures
- Returns:
- A dictionary like {'is_missing_text': True, ...}
- """
- test_dict = {}
- failure_types = [type(failure) for failure in failures]
- # FIXME: get rid of all this is_* values once there is a 1:1 map between
- # TestFailure type and test_expectations.EXPECTATION.
- if test_failures.FailureMissingAudio in failure_types:
- test_dict['is_missing_audio'] = True
-
- if test_failures.FailureMissingResult in failure_types:
- test_dict['is_missing_text'] = True
-
- if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
- test_dict['is_missing_image'] = True
-
- for failure in failures:
- if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
- test_dict['image_diff_percent'] = failure.diff_percent
-
- return test_dict
-
-
-def use_trac_links_in_results_html(port_obj):
- # We only use trac links on the buildbots.
- # Use existence of builder_name as a proxy for knowing we're on a bot.
- return port_obj.get_option("builder_name")
-
-
-# FIXME: This should be on the Manager class (since that's the only caller)
-# or split off from Manager onto another helper class, but should not be a free function.
-# Most likely this should be made into its own class, and this super-long function
-# split into many helper functions.
-def summarize_results(port_obj, expectations, result_summary, retry_summary, test_timings, only_unexpected, interrupted):
- """Summarize failing results as a dict.
-
- FIXME: split this data structure into a separate class?
-
- Args:
- port_obj: interface to port-specific hooks
- expectations: test_expectations.TestExpectations object
- result_summary: summary object from initial test runs
- retry_summary: summary object from final test run of retried tests
- test_timings: a list of TestResult objects which contain test runtimes in seconds
- only_unexpected: whether to return a summary only for the unexpected results
- Returns:
- A dictionary containing a summary of the unexpected results from the
- run, with the following fields:
- 'version': a version indicator
- 'fixable': The number of fixable tests (NOW - PASS)
- 'skipped': The number of skipped tests (NOW & SKIPPED)
- 'num_regressions': The number of non-flaky failures
- 'num_flaky': The number of flaky failures
- 'num_missing': The number of tests with missing results
- 'num_passes': The number of unexpected passes
- 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
- """
- results = {}
- results['version'] = 3
-
- tbe = result_summary.tests_by_expectation
- tbt = result_summary.tests_by_timeline
- results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
- results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
-
- num_passes = 0
- num_flaky = 0
- num_missing = 0
- num_regressions = 0
- keywords = {}
- for expecation_string, expectation_enum in TestExpectations.EXPECTATIONS.iteritems():
- keywords[expectation_enum] = expecation_string.upper()
-
- for modifier_string, modifier_enum in TestExpectations.MODIFIERS.iteritems():
- keywords[modifier_enum] = modifier_string.upper()
-
- tests = {}
- original_results = result_summary.unexpected_results if only_unexpected else result_summary.results
-
- for test_name, result in original_results.iteritems():
- # Note that if a test crashed in the original run, we ignore
- # whether or not it crashed when we retried it (if we retried it),
- # and always consider the result not flaky.
- expected = expectations.get_expectations_string(test_name)
- result_type = result.type
- actual = [keywords[result_type]]
-
- if result_type == test_expectations.SKIP:
- continue
-
- test_dict = {}
- if result.has_stderr:
- test_dict['has_stderr'] = True
-
- if result.reftest_type:
- test_dict.update(reftest_type=list(result.reftest_type))
-
- if expectations.has_modifier(test_name, test_expectations.WONTFIX):
- test_dict['wontfix'] = True
-
- if result_type == test_expectations.PASS:
- num_passes += 1
- # FIXME: include passing tests that have stderr output.
- if expected == 'PASS':
- continue
- elif result_type == test_expectations.CRASH:
- num_regressions += 1
- elif result_type == test_expectations.MISSING:
- if test_name in result_summary.unexpected_results:
- num_missing += 1
- elif test_name in result_summary.unexpected_results:
- if test_name not in retry_summary.unexpected_results:
- actual.extend(expectations.get_expectations_string(test_name).split(" "))
- num_flaky += 1
- else:
- retry_result_type = retry_summary.unexpected_results[test_name].type
- if result_type != retry_result_type:
- actual.append(keywords[retry_result_type])
- num_flaky += 1
- else:
- num_regressions += 1
-
- test_dict['expected'] = expected
- test_dict['actual'] = " ".join(actual)
- # FIXME: Set this correctly once https://webkit.org/b/37739 is fixed
- # and only set it if there actually is stderr data.
-
- test_dict.update(interpret_test_failures(port_obj, test_name, result.failures))
-
- # Store test hierarchically by directory. e.g.
- # foo/bar/baz.html: test_dict
- # foo/bar/baz1.html: test_dict
- #
- # becomes
- # foo: {
- # bar: {
- # baz.html: test_dict,
- # baz1.html: test_dict
- # }
- # }
- parts = test_name.split('/')
- current_map = tests
- for i, part in enumerate(parts):
- if i == (len(parts) - 1):
- current_map[part] = test_dict
- break
- if part not in current_map:
- current_map[part] = {}
- current_map = current_map[part]
-
- results['tests'] = tests
- results['num_passes'] = num_passes
- results['num_flaky'] = num_flaky
- results['num_missing'] = num_missing
- results['num_regressions'] = num_regressions
- results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
- results['interrupted'] = interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
- results['layout_tests_dir'] = port_obj.layout_tests_dir()
- results['has_wdiff'] = port_obj.wdiff_available()
- results['has_pretty_patch'] = port_obj.pretty_patch_available()
- results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
-
- try:
- # We only use the svn revision for using trac links in the results.html file,
- # Don't do this by default since it takes >100ms.
- # FIXME: Do we really need to populate this both here and in the json_results_generator?
- if use_trac_links_in_results_html(port_obj):
- port_obj.host.initialize_scm()
- results['revision'] = port_obj.host.scm().head_svn_revision()
- except Exception, e:
- _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
- # Handle cases where we're running outside of version control.
- import traceback
- _log.debug('Failed to learn head svn revision:')
- _log.debug(traceback.format_exc())
- results['revision'] = ""
-
- return results
-
class Manager(object):
"""A class for managing running a series of tests on a series of layout
@@ -281,12 +86,9 @@ class Manager(object):
# self._websocket_secure_server = websocket_server.PyWebSocket(
# options.results_directory, use_tls=True, port=9323)
- self._paths = set()
- self._test_names = None
- self._retrying = False
self._results_directory = self._port.results_directory()
self._finder = LayoutTestFinder(self._port, self._options)
- self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._expectations, self._test_is_slow)
+ self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
def _collect_tests(self, args):
return self._finder.find_tests(self._options, args)
@@ -297,38 +99,28 @@ class Manager(object):
def _is_websocket_test(self, test):
return self.WEBSOCKET_SUBDIR in test
- def _http_tests(self):
- return set(test for test in self._test_names if self._is_http_test(test))
+ def _http_tests(self, test_names):
+ return set(test for test in test_names if self._is_http_test(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
- def _prepare_lists(self):
- tests_to_skip = self._finder.skip_tests(self._paths, self._test_names, self._expectations, self._http_tests())
- self._test_names = [test for test in self._test_names if test not in tests_to_skip]
+ def _prepare_lists(self, paths, test_names):
+ tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
+ tests_to_run = [test for test in test_names if test not in tests_to_skip]
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
if self._options.order == 'natural':
- self._test_names.sort(key=self._port.test_key)
+ tests_to_run.sort(key=self._port.test_key)
elif self._options.order == 'random':
- random.shuffle(self._test_names)
+ random.shuffle(tests_to_run)
- self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(self._test_names)
+ tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
self._expectations.add_skipped_tests(tests_in_other_chunks)
tests_to_skip.update(tests_in_other_chunks)
- if self._options.repeat_each > 1:
- list_with_repetitions = []
- for test in self._test_names:
- list_with_repetitions += ([test] * self._options.repeat_each)
- self._test_names = list_with_repetitions
-
- if self._options.iterations > 1:
- self._test_names = self._test_names * self._options.iterations
-
- iterations = self._options.repeat_each * self._options.iterations
- return ResultSummary(self._expectations, set(self._test_names), iterations, tests_to_skip)
+ return tests_to_run, tests_to_skip
def _test_input_for_file(self, test_file):
return TestInput(test_file,
@@ -345,12 +137,12 @@ class Manager(object):
def _test_is_slow(self, test_file):
return self._expectations.has_modifier(test_file, test_expectations.SLOW)
- def needs_servers(self):
- return any(self._test_requires_lock(test_name) for test_name in self._test_names) and self._options.http
+ def needs_servers(self, test_names):
+ return any(self._test_requires_lock(test_name) for test_name in test_names) and self._options.http
- def _set_up_run(self):
+ def _set_up_run(self, test_names):
self._printer.write_update("Checking build ...")
- if not self._port.check_build(self.needs_servers()):
+ if not self._port.check_build(self.needs_servers(test_names)):
_log.error("Build check failed")
return False
@@ -363,7 +155,7 @@ class Manager(object):
# Check that the system dependencies (themes, fonts, ...) are correct.
if not self._options.nocheck_sys_deps:
self._printer.write_update("Checking system dependencies ...")
- if not self._port.check_sys_deps(self.needs_servers()):
+ if not self._port.check_sys_deps(self.needs_servers(test_names)):
self._port.stop_helper()
return False
@@ -377,110 +169,119 @@ class Manager(object):
return True
def run(self, args):
- """Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
+ """Run the tests and return a RunDetails object with the results."""
self._printer.write_update("Collecting tests ...")
try:
- self._paths, self._test_names = self._collect_tests(args)
- except IOError as exception:
+ paths, test_names = self._collect_tests(args)
+ except IOError:
# This is raised if --test-list doesn't exist
- return -1
+ return test_run_results.RunDetails(exit_code=-1)
self._printer.write_update("Parsing expectations ...")
- self._expectations = test_expectations.TestExpectations(self._port, self._test_names)
+ self._expectations = test_expectations.TestExpectations(self._port, test_names)
- num_all_test_files_found = len(self._test_names)
- result_summary = self._prepare_lists()
+ tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
+ self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
# Check to make sure we're not skipping every test.
- if not self._test_names:
+ if not tests_to_run:
_log.critical('No tests to run.')
- return -1
-
- self._printer.print_found(num_all_test_files_found, len(self._test_names), self._options.repeat_each, self._options.iterations)
- self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type)
+ return test_run_results.RunDetails(exit_code=-1)
- if not self._set_up_run():
- return -1
+ if not self._set_up_run(tests_to_run):
+ return test_run_results.RunDetails(exit_code=-1)
start_time = time.time()
+ enabled_pixel_tests_in_retry = False
+ try:
+ initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
+ int(self._options.child_processes), retrying=False)
- interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = \
- self._run_tests(self._test_names, result_summary, int(self._options.child_processes))
+ tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
+ if self._options.retry_failures and tests_to_retry and not initial_results.interrupted:
+ enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
- # We exclude the crashes from the list of results to retry, because
- # we want to treat even a potentially flaky crash as an error.
+ _log.info('')
+ _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
+ _log.info('')
+ retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
+ num_workers=1, retrying=True)
- failures = self._get_failures(result_summary, include_crashes=self._port.should_retry_crashes(), include_missing=False)
- retry_summary = result_summary
- while (len(failures) and self._options.retry_failures and not self._retrying and not interrupted and not keyboard_interrupted):
- _log.info('')
- _log.info("Retrying %d unexpected failure(s) ..." % len(failures))
- _log.info('')
- self._retrying = True
- retry_summary = ResultSummary(self._expectations, failures.keys(), 1, set())
- # Note that we intentionally ignore the return value here.
- self._run_tests(failures.keys(), retry_summary, 1)
- failures = self._get_failures(retry_summary, include_crashes=True, include_missing=True)
+ if enabled_pixel_tests_in_retry:
+ self._options.pixel_tests = False
+ else:
+ retry_results = None
+ finally:
+ self._clean_up_run()
end_time = time.time()
# Some crash logs can take a long time to be written out so look
# for new logs after the test run finishes.
- self._look_for_new_crash_logs(result_summary, start_time)
- self._look_for_new_crash_logs(retry_summary, start_time)
- self._clean_up_run()
+ _log.debug("looking for new crash logs")
+ self._look_for_new_crash_logs(initial_results, start_time)
+ if retry_results:
+ self._look_for_new_crash_logs(retry_results, start_time)
- unexpected_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=True, interrupted=interrupted)
+ _log.debug("summarizing results")
+ summarized_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
+ self._printer.print_results(end_time - start_time, initial_results, summarized_results)
- self._printer.print_results(end_time - start_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results)
-
- # Re-raise a KeyboardInterrupt if necessary so the caller can handle it.
- if keyboard_interrupted:
- raise KeyboardInterrupt
-
- # FIXME: remove record_results. It's just used for testing. There's no need
- # for it to be a commandline argument.
- if (self._options.record_results and not self._options.dry_run and not keyboard_interrupted):
+ if not self._options.dry_run:
self._port.print_leaks_summary()
- # Write the same data to log files and upload generated JSON files to appengine server.
- summarized_results = summarize_results(self._port, self._expectations, result_summary, retry_summary, individual_test_timings, only_unexpected=False, interrupted=interrupted)
- self._upload_json_files(summarized_results, result_summary, individual_test_timings)
+ self._upload_json_files(summarized_results, initial_results)
- # Write the summary to disk (results.html) and display it if requested.
- if not self._options.dry_run:
- self._copy_results_html_file()
- if self._options.show_results:
- self._show_results_html_file(result_summary)
+ results_path = self._filesystem.join(self._results_directory, "results.html")
+ self._copy_results_html_file(results_path)
+ if self._options.show_results and (initial_results.unexpected_results_by_name or
+ (self._options.full_results_html and initial_results.total_failures)):
+ self._port.show_results_html_file(results_path)
+
+ return test_run_results.RunDetails(self._port.exit_code_from_summarized_results(summarized_results),
+ summarized_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
- return self._port.exit_code_from_summarized_results(unexpected_results)
+ def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
+ needs_http = any(self._is_http_test(test) for test in tests_to_run)
+ needs_websockets = any(self._is_websocket_test(test) for test in tests_to_run)
- def _run_tests(self, tests, result_summary, num_workers):
- test_inputs = [self._test_input_for_file(test) for test in tests]
- needs_http = self._port.requires_http_server() or any(self._is_http_test(test) for test in tests)
- needs_websockets = any(self._is_websocket_test(test) for test in tests)
- return self._runner.run_tests(test_inputs, self._expectations, result_summary, num_workers, needs_http, needs_websockets, self._retrying)
+ test_inputs = []
+ for _ in xrange(iterations):
+ for test in tests_to_run:
+ for _ in xrange(repeat_each):
+ test_inputs.append(self._test_input_for_file(test))
+ return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying)
def _clean_up_run(self):
- """Restores the system after we're done running tests."""
- _log.debug("flushing stdout")
+ _log.debug("Flushing stdout")
sys.stdout.flush()
- _log.debug("flushing stderr")
+ _log.debug("Flushing stderr")
sys.stderr.flush()
- _log.debug("stopping helper")
+ _log.debug("Stopping helper")
self._port.stop_helper()
- _log.debug("cleaning up port")
+ _log.debug("Cleaning up port")
self._port.clean_up_test_run()
- def _look_for_new_crash_logs(self, result_summary, start_time):
+ def _force_pixel_tests_if_needed(self):
+ if self._options.pixel_tests:
+ return False
+
+ _log.debug("Restarting helper")
+ self._port.stop_helper()
+ self._options.pixel_tests = True
+ self._port.start_helper()
+
+ return True
+
+ def _look_for_new_crash_logs(self, run_results, start_time):
"""Since crash logs can take a long time to be written out if the system is
under stress do a second pass at the end of the test run.
- result_summary: the results of the test run
+ run_results: the results of the test run
start_time: time the tests started at. We're looking for crash
logs after that time.
"""
crashed_processes = []
- for test, result in result_summary.unexpected_results.iteritems():
+ for test, result in run_results.unexpected_results_by_name.iteritems():
if (result.type != test_expectations.CRASH):
continue
for failure in result.failures:
@@ -488,6 +289,12 @@ class Manager(object):
continue
crashed_processes.append([test, failure.process_name, failure.pid])
+ sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
+ if sample_files:
+ for test, sample_file in sample_files.iteritems():
+ writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
+ writer.copy_sample_file(sample_file)
+
crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
if crash_logs:
for test, crash_log in crash_logs.iteritems():
@@ -506,53 +313,31 @@ class Manager(object):
if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
- def _get_failures(self, result_summary, include_crashes, include_missing):
- """Filters a dict of results and returns only the failures.
-
- Args:
- result_summary: the results of the test run
- include_crashes: whether crashes are included in the output.
- We use False when finding the list of failures to retry
- to see if the results were flaky. Although the crashes may also be
- flaky, we treat them as if they aren't so that they're not ignored.
- Returns:
- a dict of files -> results
- """
- failed_results = {}
- for test, result in result_summary.unexpected_results.iteritems():
- if (result.type == test_expectations.PASS or
- (result.type == test_expectations.CRASH and not include_crashes) or
- (result.type == test_expectations.MISSING and not include_missing)):
- continue
- failed_results[test] = result.type
+ def _tests_to_retry(self, run_results, include_crashes):
+ return [result.test_name for result in run_results.unexpected_results_by_name.values() if
+ ((result.type != test_expectations.PASS) and
+ (result.type != test_expectations.MISSING) and
+ (result.type != test_expectations.CRASH or include_crashes))]
- return failed_results
-
- def _char_for_result(self, result):
- result = result.lower()
- if result in TestExpectations.EXPECTATIONS:
- result_enum_value = TestExpectations.EXPECTATIONS[result]
- else:
- result_enum_value = TestExpectations.MODIFIERS[result]
- return json_layout_results_generator.JSONLayoutResultsGenerator.FAILURE_TO_CHAR[result_enum_value]
-
- def _upload_json_files(self, summarized_results, result_summary, individual_test_timings):
+ def _upload_json_files(self, summarized_results, initial_results):
"""Writes the results of the test run as JSON files into the results
dir and upload the files to the appengine server.
Args:
- unexpected_results: dict of unexpected results
summarized_results: dict of results
- result_summary: full summary object
- individual_test_timings: list of test times (used by the flakiness
- dashboard).
+ initial_results: full summary object
"""
_log.debug("Writing JSON files in %s." % self._results_directory)
- times_trie = json_results_generator.test_timings_trie(self._port, individual_test_timings)
+ # FIXME: Upload stats.json to the server and delete times_ms.
+ times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
+ stats_trie = self._stats_trie(initial_results)
+ stats_path = self._filesystem.join(self._results_directory, "stats.json")
+ self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
+
full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
# We write full_results.json out as jsonp because we need to load it from a file url and Chromium doesn't allow that.
json_results_generator.write_json(self._filesystem, summarized_results, full_results_path, callback="ADD_RESULTS")
@@ -560,14 +345,15 @@ class Manager(object):
generator = json_layout_results_generator.JSONLayoutResultsGenerator(
self._port, self._options.builder_name, self._options.build_name,
self._options.build_number, self._results_directory,
- BUILDER_BASE_URL, individual_test_timings,
- self._expectations, result_summary, self._test_names,
+ BUILDER_BASE_URL,
+ self._expectations, initial_results,
self._options.test_results_server,
"layout-tests",
self._options.master_name)
_log.debug("Finished writing JSON files.")
+
json_files = ["incremental_results.json", "full_results.json", "times_ms.json"]
generator.upload_json_files(json_files)
@@ -579,31 +365,23 @@ class Manager(object):
self._filesystem.remove(times_json_path)
self._filesystem.remove(incremental_results_path)
- def _num_digits(self, num):
- """Returns the number of digits needed to represent the length of a
- sequence."""
- ndigits = 1
- if len(num):
- ndigits = int(math.log10(len(num))) + 1
- return ndigits
-
- def _copy_results_html_file(self):
+ def _copy_results_html_file(self, destination_path):
base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
results_file = self._filesystem.join(base_dir, 'results.html')
- # FIXME: What should we do if this doesn't exist (e.g., in unit tests)?
+ # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
+ # so make sure it exists before we try to copy it.
if self._filesystem.exists(results_file):
- self._filesystem.copyfile(results_file, self._filesystem.join(self._results_directory, "results.html"))
-
- def _show_results_html_file(self, result_summary):
- """Shows the results.html page."""
- if self._options.full_results_html:
- test_files = result_summary.failures.keys()
- else:
- unexpected_failures = self._get_failures(result_summary, include_crashes=True, include_missing=True)
- test_files = unexpected_failures.keys()
-
- if not len(test_files):
- return
-
- results_filename = self._filesystem.join(self._results_directory, "results.html")
- self._port.show_results_html_file(results_filename)
+ self._filesystem.copyfile(results_file, destination_path)
+
+ def _stats_trie(self, initial_results):
+ def _worker_number(worker_name):
+ return int(worker_name.split('/')[1]) if worker_name else -1
+
+ stats = {}
+ for result in initial_results.results_by_name.values():
+ if result.type != test_expectations.SKIP:
+ stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
+ stats_trie = {}
+ for name, value in stats.iteritems():
+ json_results_generator.add_path_to_trie(name, value, stats_trie)
+ return stats_trie
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
index e94d1332b..4a8154878 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
@@ -32,173 +31,60 @@
import sys
import time
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
-from webkitpy.layout_tests.controllers.manager import Manager, interpret_test_failures, summarize_results
+from webkitpy.layout_tests.controllers.manager import Manager
from webkitpy.layout_tests.models import test_expectations
-from webkitpy.layout_tests.models import test_failures
-from webkitpy.layout_tests.models import test_results
-from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.models.test_run_results import TestRunResults
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
class ManagerTest(unittest.TestCase):
def test_needs_servers(self):
- def get_manager_with_tests(test_names):
+ def get_manager():
port = Mock() # FIXME: Use a tighter mock.
port.TEST_PATH_SEPARATOR = '/'
manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=Mock())
- manager._test_names = test_names
return manager
- manager = get_manager_with_tests(['fast/html'])
- self.assertFalse(manager.needs_servers())
+ manager = get_manager()
+ self.assertFalse(manager.needs_servers(['fast/html']))
- manager = get_manager_with_tests(['http/tests/misc'])
- self.assertTrue(manager.needs_servers())
+ manager = get_manager()
+ self.assertTrue(manager.needs_servers(['http/tests/misc']))
def integration_test_needs_servers(self):
- def get_manager_with_tests(test_names):
+ def get_manager():
host = MockHost()
port = host.port_factory.get()
manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
- manager._collect_tests(test_names)
return manager
- manager = get_manager_with_tests(['fast/html'])
- self.assertFalse(manager.needs_servers())
+ manager = get_manager()
+ self.assertFalse(manager.needs_servers(['fast/html']))
- manager = get_manager_with_tests(['http/tests/mime'])
- self.assertTrue(manager.needs_servers())
+ manager = get_manager()
+ self.assertTrue(manager.needs_servers(['http/tests/mime']))
if sys.platform == 'win32':
- manager = get_manager_with_tests(['fast\\html'])
- self.assertFalse(manager.needs_servers())
+ manager = get_manager()
+ self.assertFalse(manager.needs_servers(['fast\\html']))
- manager = get_manager_with_tests(['http\\tests\\mime'])
- self.assertTrue(manager.needs_servers())
+ manager = get_manager()
+ self.assertTrue(manager.needs_servers(['http\\tests\\mime']))
def test_look_for_new_crash_logs(self):
- def get_manager_with_tests(test_names):
+ def get_manager():
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
- manager._collect_tests(test_names)
return manager
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
tests = ['failures/expected/crash.html']
expectations = test_expectations.TestExpectations(port, tests)
- rs = ResultSummary(expectations, tests, 1, set())
- manager = get_manager_with_tests(tests)
- manager._look_for_new_crash_logs(rs, time.time())
-
-
-class ResultSummaryTest(unittest.TestCase):
-
- def setUp(self):
- host = MockHost()
- self.port = host.port_factory.get(port_name='test')
-
- def test_interpret_test_failures(self):
- test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
- [test_failures.FailureImageHashMismatch(diff_percent=0.42)])
- self.assertEqual(test_dict['image_diff_percent'], 0.42)
-
- test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
- [test_failures.FailureReftestMismatch(self.port.abspath_for_test('foo/reftest-expected.html'))])
- self.assertTrue('image_diff_percent' in test_dict)
-
- test_dict = interpret_test_failures(self.port, 'foo/reftest.html',
- [test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/reftest-expected-mismatch.html'))])
- self.assertEqual(len(test_dict), 0)
-
- test_dict = interpret_test_failures(self.port, 'foo/audio-test.html',
- [test_failures.FailureMissingAudio()])
- self.assertTrue('is_missing_audio' in test_dict)
-
- test_dict = interpret_test_failures(self.port, 'foo/text-test.html',
- [test_failures.FailureMissingResult()])
- self.assertTrue('is_missing_text' in test_dict)
-
- test_dict = interpret_test_failures(self.port, 'foo/pixel-test.html',
- [test_failures.FailureMissingImage()])
- self.assertTrue('is_missing_image' in test_dict)
-
- test_dict = interpret_test_failures(self.port, 'foo/pixel-test.html',
- [test_failures.FailureMissingImageHash()])
- self.assertTrue('is_missing_image' in test_dict)
-
- def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
- failures = []
- if result_type == test_expectations.TIMEOUT:
- failures = [test_failures.FailureTimeout()]
- elif result_type == test_expectations.CRASH:
- failures = [test_failures.FailureCrash()]
- return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
-
- def get_result_summary(self, port, test_names, expectations_str):
- port.expectations_dict = lambda: {'': expectations_str}
- expectations = test_expectations.TestExpectations(port, test_names)
- return test_names, ResultSummary(expectations, test_names, 1, set()), expectations
-
- # FIXME: Use this to test more of summarize_results. This was moved from printing_unittest.py.
- def summarized_results(self, port, expected, passing, flaky, extra_tests=[], extra_expectations=None):
- tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/wontfix.html']
- if extra_tests:
- tests.extend(extra_tests)
-
- expectations = ''
- if extra_expectations:
- expectations += extra_expectations
-
- test_is_slow = False
- paths, rs, exp = self.get_result_summary(port, tests, expectations)
- if expected:
- rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
- elif passing:
- rs.add(self.get_result('passes/text.html'), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow)
- else:
- rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
-
- for test in extra_tests:
- rs.add(self.get_result(test, test_expectations.CRASH), expected, test_is_slow)
-
- retry = rs
- if flaky:
- paths, retry, exp = self.get_result_summary(port, tests, expectations)
- retry.add(self.get_result('passes/text.html'), True, test_is_slow)
- retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow)
- retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow)
- unexpected_results = summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
- expected_results = summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False)
- return expected_results, unexpected_results
-
- def test_no_svn_revision(self):
- host = MockHost(initialize_scm_by_default=False)
- port = host.port_factory.get('test')
- expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False)
- self.assertTrue('revision' not in unexpected_results)
-
- def test_svn_revision(self):
- host = MockHost(initialize_scm_by_default=False)
- port = host.port_factory.get('test')
- port._options.builder_name = 'dummy builder'
- expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False)
- self.assertNotEquals(unexpected_results['revision'], '')
-
- def test_summarized_results_wontfix(self):
- host = MockHost()
- port = host.port_factory.get('test')
- port._options.builder_name = 'dummy builder'
- port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), "failures/expected/wontfix.html"), "Dummy test contents")
- expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False, extra_tests=['failures/expected/wontfix.html'], extra_expectations='Bug(x) failures/expected/wontfix.html [ WontFix ]\n')
- self.assertTrue(expected_results['tests']['failures']['expected']['wontfix.html']['wontfix'])
+ run_results = TestRunResults(expectations, len(tests))
+ manager = get_manager()
+ manager._look_for_new_crash_logs(run_results, time.time())
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
index 28e9d63f0..53f25ce3b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -32,7 +32,7 @@ import re
import time
from webkitpy.layout_tests.controllers import test_result_writer
-from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
+from webkitpy.port.driver import DriverInput, DriverOutput
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_results import TestResult
@@ -41,18 +41,19 @@ from webkitpy.layout_tests.models.test_results import TestResult
_log = logging.getLogger(__name__)
-def run_single_test(port, options, test_input, driver, worker_name, stop_when_done):
- runner = SingleTestRunner(options, port, driver, test_input, worker_name, stop_when_done)
+def run_single_test(port, options, results_directory, worker_name, driver, test_input, stop_when_done):
+ runner = SingleTestRunner(port, options, results_directory, worker_name, driver, test_input, stop_when_done)
return runner.run()
class SingleTestRunner(object):
(ALONGSIDE_TEST, PLATFORM_DIR, VERSION_DIR, UPDATE) = ('alongside', 'platform', 'version', 'update')
- def __init__(self, options, port, driver, test_input, worker_name, stop_when_done):
- self._options = options
+ def __init__(self, port, options, results_directory, worker_name, driver, test_input, stop_when_done):
self._port = port
self._filesystem = port.host.filesystem
+ self._options = options
+ self._results_directory = results_directory
self._driver = driver
self._timeout = test_input.timeout
self._worker_name = worker_name
@@ -114,17 +115,17 @@ class SingleTestRunner(object):
test_result = self._compare_output(expected_driver_output, driver_output)
if self._options.new_test_results:
self._add_missing_baselines(test_result, driver_output)
- test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, expected_driver_output, test_result.failures)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, expected_driver_output, test_result.failures)
return test_result
def _run_rebaseline(self):
driver_output = self._driver.run_test(self._driver_input(), self._stop_when_done)
failures = self._handle_error(driver_output)
- test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, driver_output, None, failures)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, driver_output, None, failures)
# FIXME: It the test crashed or timed out, it might be better to avoid
# to write new baselines.
self._overwrite_baselines(driver_output)
- return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
_render_tree_dump_pattern = re.compile(r"^layer at \(\d+,\d+\) size \d+x\d+\n")
@@ -217,13 +218,13 @@ class SingleTestRunner(object):
if driver_output.crash:
# Don't continue any more if we already have a crash.
# In case of timeouts, we continue since we still want to see the text and image output.
- return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
failures.extend(self._compare_text(expected_driver_output.text, driver_output.text))
failures.extend(self._compare_audio(expected_driver_output.audio, driver_output.audio))
if self._should_run_pixel_test:
failures.extend(self._compare_image(expected_driver_output, driver_output))
- return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr())
+ return TestResult(self._test_name, failures, driver_output.test_time, driver_output.has_stderr(), pid=driver_output.pid)
def _compare_text(self, expected_text, actual_text):
failures = []
@@ -293,8 +294,10 @@ class SingleTestRunner(object):
# Note that sorting by the expectation sorts "!=" before "==" so this is easy to do.
putAllMismatchBeforeMatch = sorted
+ reference_test_names = []
for expectation, reference_filename in putAllMismatchBeforeMatch(self._reference_files):
reference_test_name = self._port.relative_test_filename(reference_filename)
+ reference_test_names.append(reference_test_name)
reference_output = self._driver.run_test(DriverInput(reference_test_name, self._timeout, None, should_run_pixel_test=True), self._stop_when_done)
test_result = self._compare_output_with_reference(reference_output, test_output, reference_filename, expectation == '!=')
@@ -303,9 +306,9 @@ class SingleTestRunner(object):
total_test_time += test_result.test_run_time
assert(reference_output)
- test_result_writer.write_test_result(self._filesystem, self._port, self._test_name, test_output, reference_output, test_result.failures)
+ test_result_writer.write_test_result(self._filesystem, self._port, self._results_directory, self._test_name, test_output, reference_output, test_result.failures)
reftest_type = set([reference_file[0] for reference_file in self._reference_files])
- return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type)
+ return TestResult(self._test_name, test_result.failures, total_test_time + test_result.test_run_time, test_result.has_stderr, reftest_type=reftest_type, pid=test_result.pid, references=reference_test_names)
def _compare_output_with_reference(self, reference_driver_output, actual_driver_output, reference_filename, mismatch):
total_test_time = reference_driver_output.test_time + actual_driver_output.test_time
@@ -317,7 +320,7 @@ class SingleTestRunner(object):
return TestResult(self._test_name, failures, total_test_time, has_stderr)
failures.extend(self._handle_error(reference_driver_output, reference_filename=reference_filename))
if failures:
- return TestResult(self._test_name, failures, total_test_time, has_stderr)
+ return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
if not reference_driver_output.image_hash and not actual_driver_output.image_hash:
failures.append(test_failures.FailureReftestNoImagesGenerated(reference_filename))
@@ -336,4 +339,4 @@ class SingleTestRunner(object):
else:
_log.warning(" %s -> ref test hashes didn't match but diff passed" % self._test_name)
- return TestResult(self._test_name, failures, total_test_time, has_stderr)
+ return TestResult(self._test_name, failures, total_test_time, has_stderr, pid=actual_driver_output.pid)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
index be178ab32..23e44d50b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
@@ -35,10 +35,10 @@ from webkitpy.layout_tests.models import test_failures
_log = logging.getLogger(__name__)
-def write_test_result(filesystem, port, test_name, driver_output,
+def write_test_result(filesystem, port, results_directory, test_name, driver_output,
expected_driver_output, failures):
"""Write the test result to the result output directory."""
- root_output_dir = port.results_directory()
+ root_output_dir = results_directory
writer = TestResultWriter(filesystem, port, root_output_dir, test_name)
if driver_output.error:
@@ -92,6 +92,7 @@ class TestResultWriter(object):
FILENAME_SUFFIX_DIFF = "-diff"
FILENAME_SUFFIX_STDERR = "-stderr"
FILENAME_SUFFIX_CRASH_LOG = "-crash-log"
+ FILENAME_SUFFIX_SAMPLE = "-sample"
FILENAME_SUFFIX_WDIFF = "-wdiff.html"
FILENAME_SUFFIX_PRETTY_PATCH = "-pretty-diff.html"
FILENAME_SUFFIX_IMAGE_DIFF = "-diff.png"
@@ -166,6 +167,10 @@ class TestResultWriter(object):
filename = self.output_filename(self.FILENAME_SUFFIX_CRASH_LOG + ".txt")
self._write_text_file(filename, crash_log)
+ def copy_sample_file(self, sample_file):
+ filename = self.output_filename(self.FILENAME_SUFFIX_SAMPLE + ".txt")
+ self._filesystem.copyfile(sample_file, filename)
+
def write_text_files(self, actual_text, expected_text):
self.write_output_files(".txt", actual_text, expected_text)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
index dfd604187..f484da55e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -24,13 +23,13 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.controllers import test_result_writer
from webkitpy.layout_tests.models import test_failures
-from webkitpy.layout_tests.port.driver import DriverOutput
-from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.port.driver import DriverOutput
+from webkitpy.port.test import TestPort
class TestResultWriterTest(unittest.TestCase):
@@ -51,10 +50,6 @@ class TestResultWriterTest(unittest.TestCase):
driver_output1 = DriverOutput('text1', 'image1', 'imagehash1', 'audio1')
driver_output2 = DriverOutput('text2', 'image2', 'imagehash2', 'audio2')
failures = [test_failures.FailureReftestMismatch(test_reference_file)]
- test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), test_name,
+ test_result_writer.write_test_result(host.filesystem, ImageDiffTestPort(host), port.results_directory(), test_name,
driver_output1, driver_output2, failures)
self.assertEqual([0], used_tolerance_values)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
index f277c93de..715497de1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_layout_results_generator.py
@@ -32,7 +32,8 @@ from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
-class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase):
+
+class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGenerator):
"""A JSON results generator for layout tests."""
LAYOUT_TESTS_PATH = "LayoutTests"
@@ -40,8 +41,8 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
# Additional JSON fields.
WONTFIX = "wontfixCounts"
- FAILURE_TO_CHAR = {test_expectations.PASS: json_results_generator.JSONResultsGeneratorBase.PASS_RESULT,
- test_expectations.SKIP: json_results_generator.JSONResultsGeneratorBase.SKIP_RESULT,
+ FAILURE_TO_CHAR = {test_expectations.PASS: json_results_generator.JSONResultsGenerator.PASS_RESULT,
+ test_expectations.SKIP: json_results_generator.JSONResultsGenerator.SKIP_RESULT,
test_expectations.CRASH: "C",
test_expectations.TIMEOUT: "T",
test_expectations.IMAGE: "I",
@@ -52,14 +53,13 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
- test_timings, expectations, result_summary, all_tests,
+ expectations, run_results,
test_results_server=None, test_type="", master_name=""):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
Args:
- result_summary: ResultsSummary object storing the summary of the test
- results.
+ run_results: TestRunResults object storing the details of the test run.
"""
super(JSONLayoutResultsGenerator, self).__init__(
port, builder_name, build_name, build_number, results_file_base_path,
@@ -68,10 +68,9 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
self._expectations = expectations
- self._result_summary = result_summary
- self._failures = dict((test_name, result_summary.results[test_name].type) for test_name in result_summary.failures)
- self._all_tests = all_tests
- self._test_timings = dict((test_tuple.test_name, test_tuple.test_run_time) for test_tuple in test_timings)
+ self._run_results = run_results
+ self._failures = dict((test_name, run_results.results_by_name[test_name].type) for test_name in run_results.failures_by_name)
+ self._test_timings = run_results.results_by_name
self.generate_json_output()
@@ -99,7 +98,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
def _get_test_timing(self, test_name):
if test_name in self._test_timings:
# Floor for now to get time in seconds.
- return int(self._test_timings[test_name])
+ return int(self._test_timings[test_name].test_run_time)
return 0
# override
@@ -108,7 +107,7 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
# override
def _get_modifier_char(self, test_name):
- if test_name not in self._all_tests:
+ if test_name not in self._run_results.results_by_name:
return self.NO_DATA_RESULT
if test_name in self._failures:
@@ -122,12 +121,12 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
# override
def _insert_failure_summaries(self, results_for_builder):
- summary = self._result_summary
+ run_results = self._run_results
self._insert_item_into_raw_list(results_for_builder,
- len((set(summary.failures.keys()) |
- summary.tests_by_expectation[test_expectations.SKIP]) &
- summary.tests_by_timeline[test_expectations.NOW]),
+ len((set(run_results.failures_by_name.keys()) |
+ run_results.tests_by_expectation[test_expectations.SKIP]) &
+ run_results.tests_by_timeline[test_expectations.NOW]),
self.FIXABLE_COUNT)
self._insert_item_into_raw_list(results_for_builder,
self._get_failure_summary_entry(test_expectations.NOW),
@@ -154,23 +153,22 @@ class JSONLayoutResultsGenerator(json_results_generator.JSONResultsGeneratorBase
"""Creates a summary object to insert into the JSON.
Args:
- summary ResultSummary object with test results
timeline current test_expectations timeline to build entry for
(e.g., test_expectations.NOW, etc.)
"""
entry = {}
- summary = self._result_summary
- timeline_tests = summary.tests_by_timeline[timeline]
+ run_results = self._run_results
+ timeline_tests = run_results.tests_by_timeline[timeline]
entry[self.SKIP_RESULT] = len(
- summary.tests_by_expectation[test_expectations.SKIP] &
+ run_results.tests_by_expectation[test_expectations.SKIP] &
timeline_tests)
entry[self.PASS_RESULT] = len(
- summary.tests_by_expectation[test_expectations.PASS] &
+ run_results.tests_by_expectation[test_expectations.PASS] &
timeline_tests)
- for failure_type in summary.tests_by_expectation.keys():
+ for failure_type in run_results.tests_by_expectation.keys():
if failure_type not in self.FAILURE_TO_CHAR:
continue
- count = len(summary.tests_by_expectation[failure_type] &
+ count = len(run_results.tests_by_expectation[failure_type] &
timeline_tests)
entry[self.FAILURE_TO_CHAR[failure_type]] = count
return entry
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
index 73834f0ad..a2c2e0b63 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator.py
@@ -150,7 +150,7 @@ class TestResult(object):
return self.failed or self.modifier == self.DISABLED
-class JSONResultsGeneratorBase(object):
+class JSONResultsGenerator(object):
"""A JSON results generator for generic tests."""
MAX_NUMBER_OF_BUILD_RESULTS_TO_LOG = 750
@@ -188,16 +188,12 @@ class JSONResultsGeneratorBase(object):
URL_FOR_TEST_LIST_JSON = "http://%s/testfile?builder=%s&name=%s&testlistjson=1&testtype=%s&master=%s"
- # FIXME: Remove generate_incremental_results once the reference to it in
- # http://src.chromium.org/viewvc/chrome/trunk/tools/build/scripts/slave/gtest_slave_utils.py
- # has been removed.
def __init__(self, port, builder_name, build_name, build_number,
results_file_base_path, builder_base_url,
test_results_map, svn_repositories=None,
test_results_server=None,
test_type="",
- master_name="",
- generate_incremental_results=None):
+ master_name=""):
"""Modifies the results.json file. Grabs it off the archive directory
if it is not found locally.
@@ -524,17 +520,9 @@ class JSONResultsGeneratorBase(object):
# Include SVN revisions for the given repositories.
for (name, path) in self._svn_repositories:
- # Note: for JSON file's backward-compatibility we use 'chrome' rather
- # than 'chromium' here.
- if name == 'chromium':
- name = 'chrome'
- self._insert_item_into_raw_list(results_for_builder,
- self._get_svn_revision(path),
- name + 'Revision')
+ self._insert_item_into_raw_list(results_for_builder, self._get_svn_revision(path), name.lower() + 'Revision')
- self._insert_item_into_raw_list(results_for_builder,
- int(time.time()),
- self.TIME)
+ self._insert_item_into_raw_list(results_for_builder, int(time.time()), self.TIME)
def _insert_test_time_and_result(self, test_name, tests):
""" Insert a test item with its results to the given tests dictionary.
@@ -653,8 +641,3 @@ class JSONResultsGeneratorBase(object):
"""Returns whether all the results are of the given type
(e.g. all passes)."""
return len(results) == 1 and results[0][1] == type
-
-
-# Left here not to break anything.
-class JSONResultsGenerator(JSONResultsGeneratorBase):
- pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
index f04300f83..c952e33bc 100644
--- a/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
import json
import optparse
import random
@@ -34,7 +34,7 @@ import random
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package import json_results_generator
from webkitpy.layout_tests.models import test_expectations
-from webkitpy.layout_tests.port import test
+from webkitpy.port import test
from webkitpy.thirdparty.mock import Mock
@@ -91,7 +91,7 @@ class JSONGeneratorTest(unittest.TestCase):
host = MockHost()
port = Mock()
port._filesystem = host.filesystem
- generator = json_results_generator.JSONResultsGeneratorBase(port,
+ generator = json_results_generator.JSONResultsGenerator(port,
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
@@ -121,14 +121,14 @@ class JSONGeneratorTest(unittest.TestCase):
fixable_count,
json, num_runs):
# Aliasing to a short name for better access to its constants.
- JRG = json_results_generator.JSONResultsGeneratorBase
+ JRG = json_results_generator.JSONResultsGenerator
- self.assertTrue(JRG.VERSION_KEY in json)
- self.assertTrue(self.builder_name in json)
+ self.assertIn(JRG.VERSION_KEY, json)
+ self.assertIn(self.builder_name, json)
buildinfo = json[self.builder_name]
- self.assertTrue(JRG.FIXABLE in buildinfo)
- self.assertTrue(JRG.TESTS in buildinfo)
+ self.assertIn(JRG.FIXABLE, buildinfo)
+ self.assertIn(JRG.TESTS, buildinfo)
self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
@@ -181,7 +181,7 @@ class JSONGeneratorTest(unittest.TestCase):
nodes = path.split("/")
sub_trie = trie
for node in nodes:
- self.assertTrue(node in sub_trie)
+ self.assertIn(node, sub_trie)
sub_trie = sub_trie[node]
return sub_trie
@@ -229,7 +229,3 @@ class JSONGeneratorTest(unittest.TestCase):
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py
new file mode 100644
index 000000000..90e4cbb51
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import optparse
+import signal
+import traceback
+
+from webkitpy.common.host import Host
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.port import platform_options
+
+
+# This mirrors what the shell normally does.
+INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
+
+# This is a randomly chosen exit code that can be tested against to
+# indicate that an unexpected exception occurred.
+EXCEPTIONAL_EXIT_STATUS = 254
+
+_log = logging.getLogger(__name__)
+
+
+def lint(host, options, logging_stream):
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+ handler = logging.StreamHandler(logging_stream)
+ logger.addHandler(handler)
+
+ try:
+ ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names(options.platform)]
+ files_linted = set()
+ lint_failed = False
+
+ for port_to_lint in ports_to_lint:
+ expectations_dict = port_to_lint.expectations_dict()
+
+ # FIXME: This won't work if multiple ports share a TestExpectations file but support different modifiers in the file.
+ for expectations_file in expectations_dict.keys():
+ if expectations_file in files_linted:
+ continue
+
+ try:
+ test_expectations.TestExpectations(port_to_lint,
+ expectations_to_lint={expectations_file: expectations_dict[expectations_file]})
+ except test_expectations.ParseError as e:
+ lint_failed = True
+ _log.error('')
+ for warning in e.warnings:
+ _log.error(warning)
+ _log.error('')
+ files_linted.add(expectations_file)
+
+ if lint_failed:
+ _log.error('Lint failed.')
+ return -1
+
+ _log.info('Lint succeeded.')
+ return 0
+ finally:
+ logger.removeHandler(handler)
+
+
+def main(argv, _, stderr):
+ parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
+ options, _ = parser.parse_args(argv)
+
+ if options.platform and 'test' in options.platform:
+ # It's a bit lame to import mocks into real code, but this allows the user
+ # to run tests against the test platform interactively, which is useful for
+ # debugging test failures.
+ from webkitpy.common.host_mock import MockHost
+ host = MockHost()
+ else:
+ host = Host()
+
+ try:
+ exit_status = lint(host, options, stderr)
+ except KeyboardInterrupt:
+ exit_status = INTERRUPTED_EXIT_STATUS
+ except Exception as e:
+ print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
+ traceback.print_exc(file=stderr)
+ exit_status = EXCEPTIONAL_EXIT_STATUS
+
+ return exit_status
diff --git a/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
new file mode 100644
index 000000000..47280292a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
@@ -0,0 +1,157 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import optparse
+import StringIO
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests import lint_test_expectations
+
+
+class FakePort(object):
+ def __init__(self, host, name, path):
+ self.host = host
+ self.name = name
+ self.path = path
+
+ def test_configuration(self):
+ return None
+
+ def expectations_dict(self):
+ self.host.ports_parsed.append(self.name)
+ return {self.path: ''}
+
+ def skipped_layout_tests(self, _):
+ return set([])
+
+ def all_test_configurations(self):
+ return []
+
+ def configuration_specifier_macros(self):
+ return []
+
+ def get_option(self, _, val):
+ return val
+
+ def path_to_generic_test_expectations_file(self):
+ return ''
+
+class FakeFactory(object):
+ def __init__(self, host, ports):
+ self.host = host
+ self.ports = {}
+ for port in ports:
+ self.ports[port.name] = port
+
+ def get(self, port_name, *args, **kwargs): # pylint: disable=W0613,E0202
+ return self.ports[port_name]
+
+ def all_port_names(self, platform=None): # pylint: disable=W0613,E0202
+ return sorted(self.ports.keys())
+
+
+class LintTest(unittest.TestCase):
+ def test_all_configurations(self):
+ host = MockHost()
+ host.ports_parsed = []
+ host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
+ FakePort(host, 'b', 'path-to-b'),
+ FakePort(host, 'b-win', 'path-to-b')))
+
+ logging_stream = StringIO.StringIO()
+ options = optparse.Values({'platform': None})
+ res = lint_test_expectations.lint(host, options, logging_stream)
+ self.assertEqual(res, 0)
+ self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
+
+ def test_lint_test_files(self):
+ logging_stream = StringIO.StringIO()
+ options = optparse.Values({'platform': 'test-mac-leopard'})
+ host = MockHost()
+
+ # pylint appears to complain incorrectly about the method overrides pylint: disable=E0202,C0322
+ # FIXME: incorrect complaints about spacing pylint: disable=C0322
+ host.port_factory.all_port_names = lambda platform=None: [platform]
+
+ res = lint_test_expectations.lint(host, options, logging_stream)
+
+ self.assertEqual(res, 0)
+ self.assertIn('Lint succeeded', logging_stream.getvalue())
+
+ def test_lint_test_files__errors(self):
+ options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
+ host = MockHost()
+
+ # FIXME: incorrect complaints about spacing pylint: disable=C0322
+ port = host.port_factory.get(options.platform, options=options)
+ port.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
+
+ host.port_factory.get = lambda platform, options=None: port
+ host.port_factory.all_port_names = lambda platform=None: [port.name()]
+
+ logging_stream = StringIO.StringIO()
+
+ res = lint_test_expectations.lint(host, options, logging_stream)
+
+ self.assertEqual(res, -1)
+ self.assertIn('Lint failed', logging_stream.getvalue())
+ self.assertIn('foo:1', logging_stream.getvalue())
+ self.assertIn('bar:1', logging_stream.getvalue())
+
+
+class MainTest(unittest.TestCase):
+ def test_success(self):
+ orig_lint_fn = lint_test_expectations.lint
+
+ # unused args pylint: disable=W0613
+ def interrupting_lint(host, options, logging_stream):
+ raise KeyboardInterrupt
+
+ def successful_lint(host, options, logging_stream):
+ return 0
+
+ def exception_raising_lint(host, options, logging_stream):
+ assert False
+
+ stdout = StringIO.StringIO()
+ stderr = StringIO.StringIO()
+ try:
+ lint_test_expectations.lint = interrupting_lint
+ res = lint_test_expectations.main([], stdout, stderr)
+ self.assertEqual(res, lint_test_expectations.INTERRUPTED_EXIT_STATUS)
+
+ lint_test_expectations.lint = successful_lint
+ res = lint_test_expectations.main(['--platform', 'test'], stdout, stderr)
+ self.assertEqual(res, 0)
+
+ lint_test_expectations.lint = exception_raising_lint
+ res = lint_test_expectations.main([], stdout, stderr)
+ self.assertEqual(res, lint_test_expectations.EXCEPTIONAL_EXIT_STATUS)
+ finally:
+ lint_test_expectations.lint = orig_lint_fn
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
deleted file mode 100644
index 5bb501061..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.layout_tests.models.test_expectations import TestExpectations, SKIP, CRASH, TIMEOUT
-
-
-class ResultSummary(object):
- def __init__(self, expectations, test_files, iterations, expected_skips):
- self.total = len(test_files) * iterations
- self.remaining = self.total
- self.expectations = expectations
- self.expected = 0
- self.unexpected = 0
- self.unexpected_failures = 0
- self.unexpected_crashes = 0
- self.unexpected_timeouts = 0
- self.total_tests_by_expectation = {}
- self.tests_by_expectation = {}
- self.tests_by_timeline = {}
- self.results = {}
- self.unexpected_results = {}
- self.failures = {}
- self.total_failures = 0
- self.expected_skips = 0
- self.total_tests_by_expectation[SKIP] = len(expected_skips)
- self.tests_by_expectation[SKIP] = expected_skips
- for expectation in TestExpectations.EXPECTATIONS.values():
- self.tests_by_expectation[expectation] = set()
- self.total_tests_by_expectation[expectation] = 0
- for timeline in TestExpectations.TIMELINES.values():
- self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
- self.slow_tests = set()
-
- def add(self, test_result, expected, test_is_slow):
- self.total_tests_by_expectation[test_result.type] += 1
- self.tests_by_expectation[test_result.type].add(test_result.test_name)
- self.results[test_result.test_name] = test_result
- self.remaining -= 1
- if len(test_result.failures):
- self.total_failures += 1
- self.failures[test_result.test_name] = test_result.failures
- if expected:
- self.expected += 1
- if test_result.type == SKIP:
- self.expected_skips += 1
- else:
- self.unexpected_results[test_result.test_name] = test_result
- self.unexpected += 1
- if len(test_result.failures):
- self.unexpected_failures += 1
- if test_result.type == CRASH:
- self.unexpected_crashes += 1
- elif test_result.type == TIMEOUT:
- self.unexpected_timeouts += 1
- if test_is_slow:
- self.slow_tests.add(test_result.test_name)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
index ec99cf6b4..1a7d375f2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.layout_tests.models.test_configuration import *
@@ -71,15 +71,15 @@ class TestConfigurationTest(unittest.TestCase):
def test_hash(self):
config_dict = {}
config_dict[TestConfiguration('xp', 'x86', 'release')] = True
- self.assertTrue(TestConfiguration('xp', 'x86', 'release') in config_dict)
+ self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release')])
def query_unknown_key():
return config_dict[TestConfiguration('xp', 'x86', 'debug')]
self.assertRaises(KeyError, query_unknown_key)
- self.assertTrue(TestConfiguration('xp', 'x86', 'release') in config_dict)
- self.assertFalse(TestConfiguration('xp', 'x86', 'debug') in config_dict)
+ self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
+ self.assertNotIn(TestConfiguration('xp', 'x86', 'debug'), config_dict)
configs_list = [TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'), TestConfiguration('xp', 'x86', 'debug')]
self.assertEqual(len(configs_list), 3)
self.assertEqual(len(set(configs_list)), 2)
@@ -103,7 +103,7 @@ class SpecifierSorterTest(unittest.TestCase):
def test_init(self):
sorter = SpecifierSorter()
- self.assertEqual(sorter.category_for_specifier('control'), None)
+ self.assertIsNone(sorter.category_for_specifier('control'))
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_for_specifier('xp'), 'version')
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
@@ -111,7 +111,7 @@ class SpecifierSorterTest(unittest.TestCase):
def test_add_specifier(self):
sorter = SpecifierSorter()
- self.assertEqual(sorter.category_for_specifier('control'), None)
+ self.assertIsNone(sorter.category_for_specifier('control'))
sorter.add_specifier('version', 'control')
self.assertEqual(sorter.category_for_specifier('control'), 'version')
sorter.add_specifier('version', 'one')
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index 234259657..f270aa41d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -277,25 +276,16 @@ class TestExpectationParser(object):
warnings = []
WEBKIT_BUG_PREFIX = 'webkit.org/b/'
- CHROMIUM_BUG_PREFIX = 'crbug.com/'
- V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
tokens = remaining_string.split()
state = 'start'
for token in tokens:
- if (token.startswith(WEBKIT_BUG_PREFIX) or
- token.startswith(CHROMIUM_BUG_PREFIX) or
- token.startswith(V8_BUG_PREFIX) or
- token.startswith('Bug(')):
+ if token.startswith(WEBKIT_BUG_PREFIX) or token.startswith('Bug('):
if state != 'start':
warnings.append('"%s" is not at the start of the line.' % token)
break
if token.startswith(WEBKIT_BUG_PREFIX):
bugs.append(token.replace(WEBKIT_BUG_PREFIX, 'BUGWK'))
- elif token.startswith(CHROMIUM_BUG_PREFIX):
- bugs.append(token.replace(CHROMIUM_BUG_PREFIX, 'BUGCR'))
- elif token.startswith(V8_BUG_PREFIX):
- bugs.append(token.replace(V8_BUG_PREFIX, 'BUGV8_'))
else:
match = re.match('Bug\((\w+)\)$', token)
if not match:
@@ -330,6 +320,8 @@ class TestExpectationParser(object):
elif state == 'expectations':
if token in ('Rebaseline', 'Skip', 'Slow', 'WontFix'):
modifiers.append(token.upper())
+ elif token not in cls._expectation_tokens:
+ warnings.append('Unrecognized expectation "%s"' % token)
else:
expectations.append(cls._expectation_tokens.get(token, token))
elif state == 'name_found':
@@ -345,12 +337,12 @@ class TestExpectationParser(object):
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
- if 'WONTFIX' in modifiers and 'SKIP' not in modifiers:
+ if 'WONTFIX' in modifiers and 'SKIP' not in modifiers and not expectations:
modifiers.append('SKIP')
if 'SKIP' in modifiers and expectations:
# FIXME: This is really a semantic warning and shouldn't be here. Remove when we drop the old syntax.
- warnings.append('A test marked Skip or WontFix must not have other expectations.')
+ warnings.append('A test marked Skip must not have other expectations.')
elif not expectations:
if 'SKIP' not in modifiers and 'REBASELINE' not in modifiers and 'SLOW' not in modifiers:
modifiers.append('SKIP')
@@ -755,7 +747,8 @@ class TestExpectations(object):
'text': TEXT,
'timeout': TIMEOUT,
'crash': CRASH,
- 'missing': MISSING}
+ 'missing': MISSING,
+ 'skip': SKIP}
# (aggregated by category, pass/fail/skip, type)
EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
@@ -840,7 +833,7 @@ class TestExpectations(object):
# FIXME: This constructor does too much work. We should move the actual parsing of
# the expectations into separate routines so that linting and handling overrides
# can be controlled separately, and the constructor can be more of a no-op.
- def __init__(self, port, tests=None, include_overrides=True, expectations_to_lint=None):
+ def __init__(self, port, tests=None, include_generic=True, include_overrides=True, expectations_to_lint=None):
self._full_test_list = tests
self._test_config = port.test_configuration()
self._is_lint_mode = expectations_to_lint is not None
@@ -848,16 +841,32 @@ class TestExpectations(object):
self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
self._port = port
self._skipped_tests_warnings = []
+ self._expectations = []
expectations_dict = expectations_to_lint or port.expectations_dict()
- self._expectations = self._parser.parse(expectations_dict.keys()[0], expectations_dict.values()[0])
- self._add_expectations(self._expectations)
- if len(expectations_dict) > 1 and include_overrides:
- for name in expectations_dict.keys()[1:]:
- expectations = self._parser.parse(name, expectations_dict[name])
+ expectations_dict_index = 0
+ # Populate generic expectations (if enabled by include_generic).
+ if port.path_to_generic_test_expectations_file() in expectations_dict:
+ if include_generic:
+ expectations = self._parser.parse(expectations_dict.keys()[expectations_dict_index], expectations_dict.values()[expectations_dict_index])
self._add_expectations(expectations)
self._expectations += expectations
+ expectations_dict_index += 1
+
+ # Populate default port expectations (always enabled).
+ if len(expectations_dict) > expectations_dict_index:
+ expectations = self._parser.parse(expectations_dict.keys()[expectations_dict_index], expectations_dict.values()[expectations_dict_index])
+ self._add_expectations(expectations)
+ self._expectations += expectations
+ expectations_dict_index += 1
+
+ # Populate override expectations (if enabled by include_overrides).
+ while len(expectations_dict) > expectations_dict_index and include_overrides:
+ expectations = self._parser.parse(expectations_dict.keys()[expectations_dict_index], expectations_dict.values()[expectations_dict_index])
+ self._add_expectations(expectations)
+ self._expectations += expectations
+ expectations_dict_index += 1
# FIXME: move ignore_tests into port.skipped_layout_tests()
self.add_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index bbe031cde..621188c19 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -27,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
@@ -330,9 +329,8 @@ class ExpectationSyntaxTests(Base):
def test_bare_name_and_bugs(self):
self.assert_tokenize_exp('webkit.org/b/12345 foo.html', modifiers=['BUGWK12345', 'SKIP'], expectations=['PASS'])
- self.assert_tokenize_exp('crbug.com/12345 foo.html', modifiers=['BUGCR12345', 'SKIP'], expectations=['PASS'])
self.assert_tokenize_exp('Bug(dpranke) foo.html', modifiers=['BUGDPRANKE', 'SKIP'], expectations=['PASS'])
- self.assert_tokenize_exp('crbug.com/12345 crbug.com/34567 foo.html', modifiers=['BUGCR12345', 'BUGCR34567', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('webkit.org/b/12345 webkit.org/b/34567 foo.html', modifiers=['BUGWK12345', 'BUGWK34567', 'SKIP'], expectations=['PASS'])
def test_comments(self):
self.assert_tokenize_exp("# comment", name=None, comment="# comment")
@@ -347,7 +345,7 @@ class ExpectationSyntaxTests(Base):
self.assert_tokenize_exp('[ Foo ] foo.html ', modifiers=['Foo', 'SKIP'], expectations=['PASS'])
def test_unknown_expectation(self):
- self.assert_tokenize_exp('foo.html [ Audio ]', expectations=['Audio'])
+ self.assert_tokenize_exp('foo.html [ Audio ]', warnings=['Unrecognized expectation "Audio"'])
def test_skip(self):
self.assert_tokenize_exp('foo.html [ Skip ]', modifiers=['SKIP'], expectations=['PASS'])
@@ -357,6 +355,8 @@ class ExpectationSyntaxTests(Base):
def test_wontfix(self):
self.assert_tokenize_exp('foo.html [ WontFix ]', modifiers=['WONTFIX', 'SKIP'], expectations=['PASS'])
+ self.assert_tokenize_exp('foo.html [ WontFix ImageOnlyFailure ]', modifiers=['WONTFIX'], expectations=['IMAGE'])
+ self.assert_tokenize_exp('foo.html [ WontFix Pass Failure ]', modifiers=['WONTFIX'], expectations=['PASS', 'FAIL'])
def test_blank_line(self):
self.assert_tokenize_exp('', name=None)
@@ -364,9 +364,9 @@ class ExpectationSyntaxTests(Base):
def test_warnings(self):
self.assert_tokenize_exp('[ Mac ]', warnings=['Did not find a test name.'], name=None)
self.assert_tokenize_exp('[ [', warnings=['unexpected "["'], name=None)
- self.assert_tokenize_exp('crbug.com/12345 ]', warnings=['unexpected "]"'], name=None)
+ self.assert_tokenize_exp('webkit.org/b/12345 ]', warnings=['unexpected "]"'], name=None)
- self.assert_tokenize_exp('foo.html crbug.com/12345 ]', warnings=['"crbug.com/12345" is not at the start of the line.'])
+ self.assert_tokenize_exp('foo.html webkit.org/b/12345 ]', warnings=['"webkit.org/b/12345" is not at the start of the line.'])
class SemanticTests(Base):
@@ -392,16 +392,16 @@ class SemanticTests(Base):
self.assertEqual(line.warnings, ['Test lacks BUG modifier.'])
def test_skip_and_wontfix(self):
- # Skip and WontFix are not allowed to have other expectations as well, because those
+ # Skip is not allowed to have other expectations as well, because those
# expectations won't be exercised and may become stale .
self.parse_exp('failures/expected/text.html [ Failure Skip ]')
self.assertTrue(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Crash WontFix ]')
- self.assertTrue(self._exp.has_warnings())
+ self.assertFalse(self._exp.has_warnings())
self.parse_exp('failures/expected/text.html [ Pass WontFix ]')
- self.assertTrue(self._exp.has_warnings())
+ self.assertFalse(self._exp.has_warnings())
def test_slow_and_timeout(self):
# A test cannot be SLOW and expected to TIMEOUT.
@@ -696,7 +696,8 @@ class TestExpectationSerializationTests(unittest.TestCase):
serialized = TestExpectations.list_to_string(lines, self._converter, reconstitute_only_these=reconstitute_only_these)
self.assertEqual(serialized, "Bug(x) [ XP Release ] Yay [ ImageOnlyFailure ]\nNay")
- def test_string_whitespace_stripping(self):
+ def disabled_test_string_whitespace_stripping(self):
+ # FIXME: Re-enable this test once we rework the code to no longer support the old syntax.
self.assert_round_trip('\n', '')
self.assert_round_trip(' [ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
self.assert_round_trip('[ FOO ] bar [ BAZ ]', '[ FOO ] bar [ BAZ ]')
@@ -704,7 +705,3 @@ class TestExpectationSerializationTests(unittest.TestCase):
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
self.assert_round_trip('[ FOO ] bar [ BAZ ] # Qux.', '[ FOO ] bar [ BAZ ] # Qux.')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py b/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
index 402b30aea..52b720854 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_failures.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
index 74ef8cbb4..ea9a2e82d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_failures_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.layout_tests.models.test_failures import *
@@ -36,7 +36,7 @@ class TestFailuresTest(unittest.TestCase):
failure_obj = cls()
s = failure_obj.dumps()
new_failure_obj = TestFailure.loads(s)
- self.assertTrue(isinstance(new_failure_obj, cls))
+ self.assertIsInstance(new_failure_obj, cls)
self.assertEqual(failure_obj, new_failure_obj)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
index 56f2d52bd..58c84ab64 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
index 6b9db5587..d6fd10b18 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_results.py
@@ -38,15 +38,24 @@ class TestResult(object):
def loads(string):
return cPickle.loads(string)
- def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False, reftest_type=[]):
+ def __init__(self, test_name, failures=None, test_run_time=None, has_stderr=False, reftest_type=None, pid=None, references=None):
self.test_name = test_name
self.failures = failures or []
- self.test_run_time = test_run_time or 0
+ self.test_run_time = test_run_time or 0 # The time taken to execute the test itself.
self.has_stderr = has_stderr
- self.reftest_type = reftest_type
+ self.reftest_type = reftest_type or []
+ self.pid = pid
+ self.references = references or []
+
# FIXME: Setting this in the constructor makes this class hard to mutate.
self.type = test_failures.determine_result_type(failures)
+ # These are set by the worker, not by the driver, so they are not passed to the constructor.
+ self.worker_name = ''
+ self.shard_name = ''
+ self.total_run_time = 0 # The time taken to run the test plus any references, compute diffs, etc.
+ self.test_number = None
+
def __eq__(self, other):
return (self.test_name == other.test_name and
self.failures == other.failures and
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
index 80d8a474e..e1bb2f27a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_results_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.layout_tests.models.test_results import TestResult
@@ -44,7 +44,7 @@ class TestResultsTest(unittest.TestCase):
test_run_time=1.1)
s = result.dumps()
new_result = TestResult.loads(s)
- self.assertTrue(isinstance(new_result, TestResult))
+ self.assertIsInstance(new_result, TestResult)
self.assertEqual(new_result, result)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
new file mode 100644
index 000000000..3af122485
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py
@@ -0,0 +1,260 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestRunResults(object):
+ def __init__(self, expectations, num_tests):
+ self.total = num_tests
+ self.remaining = self.total
+ self.expectations = expectations
+ self.expected = 0
+ self.unexpected = 0
+ self.unexpected_failures = 0
+ self.unexpected_crashes = 0
+ self.unexpected_timeouts = 0
+ self.tests_by_expectation = {}
+ self.tests_by_timeline = {}
+ self.results_by_name = {} # Map of test name to the last result for the test.
+ self.all_results = [] # All results from a run, including every iteration of every test.
+ self.unexpected_results_by_name = {}
+ self.failures_by_name = {}
+ self.total_failures = 0
+ self.expected_skips = 0
+ for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
+ self.tests_by_expectation[expectation] = set()
+ for timeline in test_expectations.TestExpectations.TIMELINES.values():
+ self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
+ self.slow_tests = set()
+ self.interrupted = False
+
+ def add(self, test_result, expected, test_is_slow):
+ self.tests_by_expectation[test_result.type].add(test_result.test_name)
+ self.results_by_name[test_result.test_name] = test_result
+ if test_result.type != test_expectations.SKIP:
+ self.all_results.append(test_result)
+ self.remaining -= 1
+ if len(test_result.failures):
+ self.total_failures += 1
+ self.failures_by_name[test_result.test_name] = test_result.failures
+ if expected:
+ self.expected += 1
+ if test_result.type == test_expectations.SKIP:
+ self.expected_skips += 1
+ else:
+ self.unexpected_results_by_name[test_result.test_name] = test_result
+ self.unexpected += 1
+ if len(test_result.failures):
+ self.unexpected_failures += 1
+ if test_result.type == test_expectations.CRASH:
+ self.unexpected_crashes += 1
+ elif test_result.type == test_expectations.TIMEOUT:
+ self.unexpected_timeouts += 1
+ if test_is_slow:
+ self.slow_tests.add(test_result.test_name)
+
+
+class RunDetails(object):
+ def __init__(self, exit_code, summarized_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
+ self.exit_code = exit_code
+ self.summarized_results = summarized_results
+ self.initial_results = initial_results
+ self.retry_results = retry_results
+ self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
+
+
+def _interpret_test_failures(failures):
+ test_dict = {}
+ failure_types = [type(failure) for failure in failures]
+ # FIXME: get rid of all this is_* values once there is a 1:1 map between
+ # TestFailure type and test_expectations.EXPECTATION.
+ if test_failures.FailureMissingAudio in failure_types:
+ test_dict['is_missing_audio'] = True
+
+ if test_failures.FailureMissingResult in failure_types:
+ test_dict['is_missing_text'] = True
+
+ if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
+ test_dict['is_missing_image'] = True
+
+ if 'image_diff_percent' not in test_dict:
+ for failure in failures:
+ if isinstance(failure, test_failures.FailureImageHashMismatch) or isinstance(failure, test_failures.FailureReftestMismatch):
+ test_dict['image_diff_percent'] = failure.diff_percent
+
+ return test_dict
+
+
+def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry):
+ """Returns a dictionary containing a summary of the test runs, with the following fields:
+ 'version': a version indicator
+ 'fixable': The number of fixable tests (NOW - PASS)
+ 'skipped': The number of skipped tests (NOW & SKIPPED)
+ 'num_regressions': The number of non-flaky failures
+ 'num_flaky': The number of flaky failures
+ 'num_missing': The number of tests with missing results
+ 'num_passes': The number of unexpected passes
+ 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
+ """
+ results = {}
+ results['version'] = 3
+
+ tbe = initial_results.tests_by_expectation
+ tbt = initial_results.tests_by_timeline
+ results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
+ results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
+
+ num_passes = 0
+ num_flaky = 0
+ num_missing = 0
+ num_regressions = 0
+ keywords = {}
+ for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
+ keywords[expectation_enum] = expecation_string.upper()
+
+ for modifier_string, modifier_enum in test_expectations.TestExpectations.MODIFIERS.iteritems():
+ keywords[modifier_enum] = modifier_string.upper()
+
+ tests = {}
+
+ for test_name, result in initial_results.results_by_name.iteritems():
+ # Note that if a test crashed in the original run, we ignore
+ # whether or not it crashed when we retried it (if we retried it),
+ # and always consider the result not flaky.
+ expected = expectations.get_expectations_string(test_name)
+ result_type = result.type
+ actual = [keywords[result_type]]
+
+ if result_type == test_expectations.SKIP:
+ continue
+
+ test_dict = {}
+ if result.has_stderr:
+ test_dict['has_stderr'] = True
+
+ if result.reftest_type:
+ test_dict.update(reftest_type=list(result.reftest_type))
+
+ if expectations.has_modifier(test_name, test_expectations.WONTFIX):
+ test_dict['wontfix'] = True
+
+ if result_type == test_expectations.PASS:
+ num_passes += 1
+ # FIXME: include passing tests that have stderr output.
+ if expected == 'PASS':
+ continue
+ elif result_type == test_expectations.CRASH:
+ if test_name in initial_results.unexpected_results_by_name:
+ num_regressions += 1
+ elif result_type == test_expectations.MISSING:
+ if test_name in initial_results.unexpected_results_by_name:
+ num_missing += 1
+ elif test_name in initial_results.unexpected_results_by_name:
+ if retry_results and test_name not in retry_results.unexpected_results_by_name:
+ actual.extend(expectations.get_expectations_string(test_name).split(" "))
+ num_flaky += 1
+ elif retry_results:
+ retry_result_type = retry_results.unexpected_results_by_name[test_name].type
+ if result_type != retry_result_type:
+ if enabled_pixel_tests_in_retry and result_type == test_expectations.TEXT and retry_result_type == test_expectations.IMAGE_PLUS_TEXT:
+ num_regressions += 1
+ else:
+ num_flaky += 1
+ actual.append(keywords[retry_result_type])
+ else:
+ num_regressions += 1
+ else:
+ num_regressions += 1
+
+ test_dict['expected'] = expected
+ test_dict['actual'] = " ".join(actual)
+
+ test_dict.update(_interpret_test_failures(result.failures))
+
+ if retry_results:
+ retry_result = retry_results.unexpected_results_by_name.get(test_name)
+ if retry_result:
+ test_dict.update(_interpret_test_failures(retry_result.failures))
+
+ # Store test hierarchically by directory. e.g.
+ # foo/bar/baz.html: test_dict
+ # foo/bar/baz1.html: test_dict
+ #
+ # becomes
+ # foo: {
+ # bar: {
+ # baz.html: test_dict,
+ # baz1.html: test_dict
+ # }
+ # }
+ parts = test_name.split('/')
+ current_map = tests
+ for i, part in enumerate(parts):
+ if i == (len(parts) - 1):
+ current_map[part] = test_dict
+ break
+ if part not in current_map:
+ current_map[part] = {}
+ current_map = current_map[part]
+
+ results['tests'] = tests
+ results['num_passes'] = num_passes
+ results['num_flaky'] = num_flaky
+ results['num_missing'] = num_missing
+ results['num_regressions'] = num_regressions
+ results['uses_expectations_file'] = port_obj.uses_test_expectations_file()
+ results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
+ results['layout_tests_dir'] = port_obj.layout_tests_dir()
+ results['has_wdiff'] = port_obj.wdiff_available()
+ results['has_pretty_patch'] = port_obj.pretty_patch_available()
+ results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
+
+ try:
+ # We only use the svn revision for using trac links in the results.html file,
+ # Don't do this by default since it takes >100ms.
+ # FIXME: Do we really need to populate this both here and in the json_results_generator?
+ if port_obj.get_option("builder_name"):
+ port_obj.host.initialize_scm()
+ results['revision'] = port_obj.host.scm().head_svn_revision()
+ except Exception, e:
+ _log.warn("Failed to determine svn revision for checkout (cwd: %s, webkit_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
+ # Handle cases where we're running outside of version control.
+ import traceback
+ _log.debug('Failed to learn head svn revision:')
+ _log.debug(traceback.format_exc())
+ results['revision'] = ""
+
+ return results
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
new file mode 100644
index 000000000..c0d9265f0
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_run_results_unittest.py
@@ -0,0 +1,135 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.layout_tests.models import test_run_results
+
+
+def get_result(test_name, result_type=test_expectations.PASS, run_time=0):
+ failures = []
+ if result_type == test_expectations.TIMEOUT:
+ failures = [test_failures.FailureTimeout()]
+ elif result_type == test_expectations.AUDIO:
+ failures = [test_failures.FailureAudioMismatch()]
+ elif result_type == test_expectations.CRASH:
+ failures = [test_failures.FailureCrash()]
+ return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
+
+
+def run_results(port):
+ tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html', 'failures/expected/hang.html',
+ 'failures/expected/audio.html']
+ expectations = test_expectations.TestExpectations(port, tests)
+ return test_run_results.TestRunResults(expectations, len(tests))
+
+
+def summarized_results(port, expected, passing, flaky):
+ test_is_slow = False
+
+ initial_results = run_results(port)
+ if expected:
+ initial_results.add(get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
+ elif passing:
+ initial_results.add(get_result('passes/text.html'), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/audio.html'), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/timeout.html'), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/crash.html'), expected, test_is_slow)
+ else:
+ initial_results.add(get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/audio.html', test_expectations.AUDIO), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
+ initial_results.add(get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
+
+ # we only list hang.html here, since normally this is WontFix
+ initial_results.add(get_result('failures/expected/hang.html', test_expectations.TIMEOUT), expected, test_is_slow)
+
+ if flaky:
+ retry_results = run_results(port)
+ retry_results.add(get_result('passes/text.html'), True, test_is_slow)
+ retry_results.add(get_result('failures/expected/timeout.html'), True, test_is_slow)
+ retry_results.add(get_result('failures/expected/crash.html'), True, test_is_slow)
+ else:
+ retry_results = None
+
+ return test_run_results.summarize_results(port, initial_results.expectations, initial_results, retry_results, enabled_pixel_tests_in_retry=False)
+
+
+class InterpretTestFailuresTest(unittest.TestCase):
+ def setUp(self):
+ host = MockHost()
+ self.port = host.port_factory.get(port_name='test')
+
+ def test_interpret_test_failures(self):
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureImageHashMismatch(diff_percent=0.42)])
+ self.assertEqual(test_dict['image_diff_percent'], 0.42)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureReftestMismatch(self.port.abspath_for_test('foo/reftest-expected.html'))])
+ self.assertIn('image_diff_percent', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureReftestMismatchDidNotOccur(self.port.abspath_for_test('foo/reftest-expected-mismatch.html'))])
+ self.assertEqual(len(test_dict), 0)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingAudio()])
+ self.assertIn('is_missing_audio', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingResult()])
+ self.assertIn('is_missing_text', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingImage()])
+ self.assertIn('is_missing_image', test_dict)
+
+ test_dict = test_run_results._interpret_test_failures([test_failures.FailureMissingImageHash()])
+ self.assertIn('is_missing_image', test_dict)
+
+
+class SummarizedResultsTest(unittest.TestCase):
+ def setUp(self):
+ host = MockHost(initialize_scm_by_default=False)
+ self.port = host.port_factory.get(port_name='test')
+
+ def test_no_svn_revision(self):
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertNotIn('revision', summary)
+
+ def test_svn_revision(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertNotEquals(summary['revision'], '')
+
+ def test_summarized_results_wontfix(self):
+ self.port._options.builder_name = 'dummy builder'
+ summary = summarized_results(self.port, expected=False, passing=False, flaky=False)
+ self.assertTrue(summary['tests']['failures']['expected']['hang.html']['wontfix'])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
deleted file mode 100755
index 4e0fbe282..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ /dev/null
@@ -1,455 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Chromium implementations of the Port interface."""
-
-import base64
-import errno
-import logging
-import re
-import signal
-import subprocess
-import sys
-import time
-
-from webkitpy.common.system import executive
-from webkitpy.common.system.path import cygpath
-from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.base import Port, VirtualTestSuite
-
-
-_log = logging.getLogger(__name__)
-
-
-class ChromiumPort(Port):
- """Abstract base class for Chromium implementations of the Port class."""
-
- ALL_SYSTEMS = (
- ('snowleopard', 'x86'),
- ('lion', 'x86'),
- ('mountainlion', 'x86'),
- ('xp', 'x86'),
- ('win7', 'x86'),
- ('lucid', 'x86'),
- ('lucid', 'x86_64'),
- # FIXME: Technically this should be 'arm', but adding a third architecture type breaks TestConfigurationConverter.
- # If we need this to be 'arm' in the future, then we first have to fix TestConfigurationConverter.
- ('icecreamsandwich', 'x86'))
-
- ALL_BASELINE_VARIANTS = [
- 'chromium-mac-mountainlion', 'chromium-mac-lion', 'chromium-mac-snowleopard',
- 'chromium-win-win7', 'chromium-win-xp',
- 'chromium-linux-x86_64', 'chromium-linux-x86',
- ]
-
- CONFIGURATION_SPECIFIER_MACROS = {
- 'mac': ['snowleopard', 'lion', 'mountainlion'],
- 'win': ['xp', 'win7'],
- 'linux': ['lucid'],
- 'android': ['icecreamsandwich'],
- }
-
- DEFAULT_BUILD_DIRECTORIES = ('out',)
-
- # overridden in subclasses.
- FALLBACK_PATHS = {}
-
- @classmethod
- def _static_build_path(cls, filesystem, build_directory, chromium_base, webkit_base, configuration, comps):
- if build_directory:
- return filesystem.join(build_directory, configuration, *comps)
-
- for directory in cls.DEFAULT_BUILD_DIRECTORIES:
- base_dir = filesystem.join(chromium_base, directory, configuration)
- if filesystem.exists(base_dir):
- return filesystem.join(base_dir, *comps)
-
- for directory in cls.DEFAULT_BUILD_DIRECTORIES:
- base_dir = filesystem.join(webkit_base, directory, configuration)
- if filesystem.exists(base_dir):
- return filesystem.join(base_dir, *comps)
-
- # We have to default to something, so pick the last one.
- return filesystem.join(base_dir, *comps)
-
- @classmethod
- def _chromium_base_dir(cls, filesystem):
- module_path = filesystem.path_to_module(cls.__module__)
- offset = module_path.find('third_party')
- if offset == -1:
- return filesystem.join(module_path[0:module_path.find('Tools')], 'Source', 'WebKit', 'chromium')
- else:
- return module_path[0:offset]
-
- def __init__(self, host, port_name, **kwargs):
- super(ChromiumPort, self).__init__(host, port_name, **kwargs)
- # All sub-classes override this, but we need an initial value for testing.
- self._chromium_base_dir_path = None
-
- def is_chromium(self):
- return True
-
- def default_max_locked_shards(self):
- """Return the number of "locked" shards to run in parallel (like the http tests)."""
- max_locked_shards = int(self.default_child_processes()) / 4
- if not max_locked_shards:
- return 1
- return max_locked_shards
-
- def default_pixel_tests(self):
- return True
-
- def default_baseline_search_path(self):
- return map(self._webkit_baseline_path, self.FALLBACK_PATHS[self.version()])
-
- def default_timeout_ms(self):
- if self.get_option('configuration') == 'Debug':
- return 12 * 1000
- return 6 * 1000
-
- def _check_file_exists(self, path_to_file, file_description,
- override_step=None, logging=True):
- """Verify the file is present where expected or log an error.
-
- Args:
- file_name: The (human friendly) name or description of the file
- you're looking for (e.g., "HTTP Server"). Used for error logging.
- override_step: An optional string to be logged if the check fails.
- logging: Whether or not log the error messages."""
- if not self._filesystem.exists(path_to_file):
- if logging:
- _log.error('Unable to find %s' % file_description)
- _log.error(' at %s' % path_to_file)
- if override_step:
- _log.error(' %s' % override_step)
- _log.error('')
- return False
- return True
-
- def check_build(self, needs_http):
- result = True
-
- dump_render_tree_binary_path = self._path_to_driver()
- result = self._check_file_exists(dump_render_tree_binary_path,
- 'test driver') and result
- if result and self.get_option('build'):
- result = self._check_driver_build_up_to_date(
- self.get_option('configuration'))
- else:
- _log.error('')
-
- helper_path = self._path_to_helper()
- if helper_path:
- result = self._check_file_exists(helper_path,
- 'layout test helper') and result
-
- if self.get_option('pixel_tests'):
- result = self.check_image_diff(
- 'To override, invoke with --no-pixel-tests') and result
-
- # It's okay if pretty patch and wdiff aren't available, but we will at least log messages.
- self._pretty_patch_available = self.check_pretty_patch()
- self._wdiff_available = self.check_wdiff()
-
- return result
-
- def check_sys_deps(self, needs_http):
- result = super(ChromiumPort, self).check_sys_deps(needs_http)
-
- cmd = [self._path_to_driver(), '--check-layout-test-sys-deps']
-
- local_error = executive.ScriptError()
-
- def error_handler(script_error):
- local_error.exit_code = script_error.exit_code
-
- output = self._executive.run_command(cmd, error_handler=error_handler)
- if local_error.exit_code:
- _log.error('System dependencies check failed.')
- _log.error('To override, invoke with --nocheck-sys-deps')
- _log.error('')
- _log.error(output)
- return False
- return result
-
- def check_image_diff(self, override_step=None, logging=True):
- image_diff_path = self._path_to_image_diff()
- return self._check_file_exists(image_diff_path, 'image diff exe',
- override_step, logging)
-
- def diff_image(self, expected_contents, actual_contents, tolerance=None):
- # tolerance is not used in chromium. Make sure caller doesn't pass tolerance other than zero or None.
- assert (tolerance is None) or tolerance == 0
-
- # If only one of them exists, return that one.
- if not actual_contents and not expected_contents:
- return (None, 0, None)
- if not actual_contents:
- return (expected_contents, 0, None)
- if not expected_contents:
- return (actual_contents, 0, None)
-
- tempdir = self._filesystem.mkdtemp()
-
- expected_filename = self._filesystem.join(str(tempdir), "expected.png")
- self._filesystem.write_binary_file(expected_filename, expected_contents)
-
- actual_filename = self._filesystem.join(str(tempdir), "actual.png")
- self._filesystem.write_binary_file(actual_filename, actual_contents)
-
- diff_filename = self._filesystem.join(str(tempdir), "diff.png")
-
- native_expected_filename = self._convert_path(expected_filename)
- native_actual_filename = self._convert_path(actual_filename)
- native_diff_filename = self._convert_path(diff_filename)
-
- executable = self._path_to_image_diff()
- # Note that although we are handed 'old', 'new', image_diff wants 'new', 'old'.
- comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
-
- result = None
- err_str = None
- try:
- exit_code = self._executive.run_command(comand, return_exit_code=True)
- if exit_code == 0:
- # The images are the same.
- result = None
- elif exit_code == 1:
- result = self._filesystem.read_binary_file(native_diff_filename)
- else:
- err_str = "image diff returned an exit code of %s" % exit_code
- except OSError, e:
- err_str = 'error running image diff: %s' % str(e)
- finally:
- self._filesystem.rmtree(str(tempdir))
-
- return (result, 0, err_str or None) # FIXME: how to get % diff?
-
- def path_from_chromium_base(self, *comps):
- """Returns the full path to path made by joining the top of the
- Chromium source tree and the list of path components in |*comps|."""
- if self._chromium_base_dir_path is None:
- self._chromium_base_dir_path = self._chromium_base_dir(self._filesystem)
- return self._filesystem.join(self._chromium_base_dir_path, *comps)
-
- def setup_environ_for_server(self, server_name=None):
- clean_env = super(ChromiumPort, self).setup_environ_for_server(server_name)
- # Webkit Linux (valgrind layout) bot needs these envvars.
- self._copy_value_from_environ_if_set(clean_env, 'VALGRIND_LIB')
- self._copy_value_from_environ_if_set(clean_env, 'VALGRIND_LIB_INNER')
- return clean_env
-
- def default_results_directory(self):
- try:
- return self.path_from_chromium_base('webkit', self.get_option('configuration'), 'layout-test-results')
- except AssertionError:
- return self._build_path('layout-test-results')
-
- def _missing_symbol_to_skipped_tests(self):
- # FIXME: Should WebKitPort have these definitions also?
- return {
- "ff_mp3_decoder": ["webaudio/codec-tests/mp3"],
- "ff_aac_decoder": ["webaudio/codec-tests/aac"],
- }
-
- def skipped_layout_tests(self, test_list):
- # FIXME: Merge w/ WebKitPort.skipped_layout_tests()
- return set(self._skipped_tests_for_unsupported_features(test_list))
-
- def setup_test_run(self):
- # Delete the disk cache if any to ensure a clean test run.
- dump_render_tree_binary_path = self._path_to_driver()
- cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
- cachedir = self._filesystem.join(cachedir, "cache")
- if self._filesystem.exists(cachedir):
- self._filesystem.rmtree(cachedir)
-
- def start_helper(self):
- helper_path = self._path_to_helper()
- if helper_path:
- _log.debug("Starting layout helper %s" % helper_path)
- # Note: Not thread safe: http://bugs.python.org/issue2320
- self._helper = subprocess.Popen([helper_path],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
- is_ready = self._helper.stdout.readline()
- if not is_ready.startswith('ready'):
- _log.error("layout_test_helper failed to be ready")
-
- def stop_helper(self):
- if self._helper:
- _log.debug("Stopping layout test helper")
- try:
- self._helper.stdin.write("x\n")
- self._helper.stdin.close()
- self._helper.wait()
- except IOError, e:
- pass
- finally:
- self._helper = None
-
-
- def exit_code_from_summarized_results(self, unexpected_results):
- # Turn bots red for missing results.
- return unexpected_results['num_regressions'] + unexpected_results['num_missing']
-
- def configuration_specifier_macros(self):
- return self.CONFIGURATION_SPECIFIER_MACROS
-
- def all_baseline_variants(self):
- return self.ALL_BASELINE_VARIANTS
-
- def _generate_all_test_configurations(self):
- """Returns a sequence of the TestConfigurations the port supports."""
- # By default, we assume we want to test every graphics type in
- # every configuration on every system.
- test_configurations = []
- for version, architecture in self.ALL_SYSTEMS:
- for build_type in self.ALL_BUILD_TYPES:
- test_configurations.append(TestConfiguration(version, architecture, build_type))
- return test_configurations
-
- try_builder_names = frozenset([
- 'linux_layout',
- 'mac_layout',
- 'win_layout',
- 'linux_layout_rel',
- 'mac_layout_rel',
- 'win_layout_rel',
- ])
-
- def warn_if_bug_missing_in_test_expectations(self):
- return True
-
- def expectations_files(self):
- paths = [self.path_to_test_expectations_file()]
- skia_expectations_path = self.path_from_chromium_base('skia', 'skia_test_expectations.txt')
- # FIXME: we should probably warn if this file is missing in some situations.
- # See the discussion in webkit.org/b/97699.
- if self._filesystem.exists(skia_expectations_path):
- paths.append(skia_expectations_path)
-
- builder_name = self.get_option('builder_name', 'DUMMY_BUILDER_NAME')
- if builder_name == 'DUMMY_BUILDER_NAME' or '(deps)' in builder_name or builder_name in self.try_builder_names:
- paths.append(self.path_from_chromium_base('webkit', 'tools', 'layout_tests', 'test_expectations.txt'))
- return paths
-
- def repository_paths(self):
- repos = super(ChromiumPort, self).repository_paths()
- repos.append(('chromium', self.path_from_chromium_base('build')))
- return repos
-
- def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
- if stderr and 'AddressSanitizer' in stderr:
- asan_filter_path = self.path_from_chromium_base('tools', 'valgrind', 'asan', 'asan_symbolize.py')
- if self._filesystem.exists(asan_filter_path):
- output = self._executive.run_command([asan_filter_path], input=stderr, decode_output=False)
- stderr = self._executive.run_command(['c++filt'], input=output, decode_output=False)
-
- return super(ChromiumPort, self)._get_crash_log(name, pid, stdout, stderr, newer_than)
-
- def virtual_test_suites(self):
- return [
- VirtualTestSuite('platform/chromium/virtual/gpu/fast/canvas',
- 'fast/canvas',
- ['--enable-accelerated-2d-canvas']),
- VirtualTestSuite('platform/chromium/virtual/gpu/canvas/philip',
- 'canvas/philip',
- ['--enable-accelerated-2d-canvas']),
- VirtualTestSuite('platform/chromium/virtual/threaded/compositing/visibility',
- 'compositing/visibility',
- ['--enable-threaded-compositing']),
- VirtualTestSuite('platform/chromium/virtual/threaded/compositing/webgl',
- 'compositing/webgl',
- ['--enable-threaded-compositing']),
- VirtualTestSuite('platform/chromium/virtual/gpu/fast/hidpi',
- 'fast/hidpi',
- ['--force-compositing-mode']),
- VirtualTestSuite('platform/chromium/virtual/softwarecompositing',
- 'compositing',
- ['--enable-software-compositing']),
- VirtualTestSuite('platform/chromium/virtual/deferred/fast/images',
- 'fast/images',
- ['--enable-deferred-image-decoding', '--enable-per-tile-painting', '--force-compositing-mode']),
- ]
-
- #
- # PROTECTED METHODS
- #
- # These routines should only be called by other methods in this file
- # or any subclasses.
- #
-
- def _build_path(self, *comps):
- return self._build_path_with_configuration(None, *comps)
-
- def _build_path_with_configuration(self, configuration, *comps):
- # Note that we don't implement --root or do the option caching that the
- # base class does, because chromium doesn't use 'webkit-build-directory' and
- # hence finding the right directory is relatively fast.
- configuration = configuration or self.get_option('configuration')
- return self._static_build_path(self._filesystem, self.get_option('build_directory'),
- self.path_from_chromium_base(), self.path_from_webkit_base(), configuration, comps)
-
- def _path_to_image_diff(self):
- binary_name = 'ImageDiff'
- return self._build_path(binary_name)
-
- def _check_driver_build_up_to_date(self, configuration):
- if configuration in ('Debug', 'Release'):
- try:
- debug_path = self._path_to_driver('Debug')
- release_path = self._path_to_driver('Release')
-
- debug_mtime = self._filesystem.mtime(debug_path)
- release_mtime = self._filesystem.mtime(release_path)
-
- if (debug_mtime > release_mtime and configuration == 'Release' or
- release_mtime > debug_mtime and configuration == 'Debug'):
- most_recent_binary = 'Release' if configuration == 'Debug' else 'Debug'
- _log.warning('You are running the %s binary. However the %s binary appears to be more recent. '
- 'Please pass --%s.', configuration, most_recent_binary, most_recent_binary.lower())
- _log.warning('')
- # This will fail if we don't have both a debug and release binary.
- # That's fine because, in this case, we must already be running the
- # most up-to-date one.
- except OSError:
- pass
- return True
-
- def _chromium_baseline_path(self, platform):
- if platform is None:
- platform = self.name()
- return self.path_from_webkit_base('LayoutTests', 'platform', platform)
-
- def _convert_path(self, path):
- """Handles filename conversion for subprocess command line args."""
- # See note above in diff_image() for why we need this.
- if sys.platform == 'cygwin':
- return cygpath(path)
- return path
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
deleted file mode 100644
index 91cd3100c..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ /dev/null
@@ -1,721 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import copy
-import logging
-import os
-import re
-import subprocess
-import sys
-import threading
-import time
-
-from webkitpy.layout_tests.port import chromium
-from webkitpy.layout_tests.port import driver
-from webkitpy.layout_tests.port import factory
-from webkitpy.layout_tests.port import server_process
-
-
-_log = logging.getLogger(__name__)
-
-# The root directory for test resources, which has the same structure as the
-# source root directory of Chromium.
-# This path is defined in Chromium's base/test/test_support_android.cc.
-DEVICE_SOURCE_ROOT_DIR = '/data/local/tmp/'
-COMMAND_LINE_FILE = DEVICE_SOURCE_ROOT_DIR + 'chrome-native-tests-command-line'
-
-# The directory to put tools and resources of DumpRenderTree.
-# If change this, must also change Tools/DumpRenderTree/chromium/TestShellAndroid.cpp
-# and Chromium's webkit/support/platform_support_android.cc.
-DEVICE_DRT_DIR = DEVICE_SOURCE_ROOT_DIR + 'drt/'
-DEVICE_FORWARDER_PATH = DEVICE_DRT_DIR + 'forwarder'
-
-# Path on the device where the test framework will create the fifo pipes.
-DEVICE_FIFO_PATH = '/data/data/org.chromium.native_test/files/'
-
-DRT_APP_PACKAGE = 'org.chromium.native_test'
-DRT_ACTIVITY_FULL_NAME = DRT_APP_PACKAGE + '/.ChromeNativeTestActivity'
-DRT_APP_CACHE_DIR = DEVICE_DRT_DIR + 'cache/'
-
-SCALING_GOVERNORS_PATTERN = "/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"
-
-# All the test cases are still served to DumpRenderTree through file protocol,
-# but we use a file-to-http feature to bridge the file request to host's http
-# server to get the real test files and corresponding resources.
-# See webkit/support/platform_support_android.cc for the other side of this bridge.
-PERF_TEST_PATH_PREFIX = '/all-perf-tests'
-LAYOUT_TEST_PATH_PREFIX = '/all-tests'
-
-# All ports the Android forwarder to forward.
-# 8000, 8080 and 8443 are for http/https tests.
-# 8880 and 9323 are for websocket tests
-# (see http_server.py, apache_http_server.py and websocket_server.py).
-FORWARD_PORTS = '8000 8080 8443 8880 9323'
-
-MS_TRUETYPE_FONTS_DIR = '/usr/share/fonts/truetype/msttcorefonts/'
-MS_TRUETYPE_FONTS_PACKAGE = 'ttf-mscorefonts-installer'
-
-# Timeout in seconds to wait for start/stop of DumpRenderTree.
-DRT_START_STOP_TIMEOUT_SECS = 10
-
-# List of fonts that layout tests expect, copied from DumpRenderTree/chromium/TestShellX11.cpp.
-HOST_FONT_FILES = [
- [[MS_TRUETYPE_FONTS_DIR], 'Arial.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Arial_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Arial_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Arial_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Comic_Sans_MS.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Comic_Sans_MS_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Courier_New.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Courier_New_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Georgia.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Georgia_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Impact.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Trebuchet_MS_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Times_New_Roman_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Verdana.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Bold.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Bold_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- [[MS_TRUETYPE_FONTS_DIR], 'Verdana_Italic.ttf', MS_TRUETYPE_FONTS_PACKAGE],
- # The Microsoft font EULA
- [['/usr/share/doc/ttf-mscorefonts-installer/'], 'READ_ME!.gz', MS_TRUETYPE_FONTS_PACKAGE],
- # Other fonts: Arabic, CJK, Indic, Thai, etc.
- [['/usr/share/fonts/truetype/ttf-dejavu/'], 'DejaVuSans.ttf', 'ttf-dejavu'],
- [['/usr/share/fonts/truetype/kochi/'], 'kochi-mincho.ttf', 'ttf-kochi-mincho'],
- [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'lohit_hi.ttf', 'ttf-indic-fonts-core'],
- [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'lohit_ta.ttf', 'ttf-indic-fonts-core'],
- [['/usr/share/fonts/truetype/ttf-indic-fonts-core/'], 'MuktiNarrow.ttf', 'ttf-indic-fonts-core'],
- [['/usr/share/fonts/truetype/thai/', '/usr/share/fonts/truetype/tlwg/'], 'Garuda.ttf', 'fonts-tlwg-garuda'],
- [['/usr/share/fonts/truetype/ttf-indic-fonts-core/', '/usr/share/fonts/truetype/ttf-punjabi-fonts/'], 'lohit_pa.ttf', 'ttf-indic-fonts-core'],
-]
-
-DEVICE_FONTS_DIR = DEVICE_DRT_DIR + 'fonts/'
-
-# The layout tests directory on device, which has two usages:
-# 1. as a virtual path in file urls that will be bridged to HTTP.
-# 2. pointing to some files that are pushed to the device for tests that
-# don't work on file-over-http (e.g. blob protocol tests).
-DEVICE_WEBKIT_BASE_DIR = DEVICE_SOURCE_ROOT_DIR + 'third_party/WebKit/'
-DEVICE_LAYOUT_TESTS_DIR = DEVICE_WEBKIT_BASE_DIR + 'LayoutTests/'
-
-# Test resources that need to be accessed as files directly.
-# Each item can be the relative path of a directory or a file.
-TEST_RESOURCES_TO_PUSH = [
- # Blob tests need to access files directly.
- 'editing/pasteboard/resources',
- 'fast/files/resources',
- 'http/tests/local/resources',
- 'http/tests/local/formdata/resources',
- # User style URLs are accessed as local files in webkit_support.
- 'http/tests/security/resources/cssStyle.css',
- # Media tests need to access audio/video as files.
- 'media/content',
- 'compositing/resources/video.mp4',
-]
-
-MD5SUM_DEVICE_FILE_NAME = 'md5sum_bin'
-MD5SUM_DEVICE_PATH = '/data/local/tmp/' + MD5SUM_DEVICE_FILE_NAME
-
-class ChromiumAndroidPort(chromium.ChromiumPort):
- port_name = 'chromium-android'
-
- # Avoid initializing the adb path [worker count]+1 times by storing it as a static member.
- _adb_path = None
-
- FALLBACK_PATHS = [
- 'chromium-android',
- 'chromium-linux',
- 'chromium-win',
- 'chromium',
- ]
-
- def __init__(self, host, port_name, **kwargs):
- super(ChromiumAndroidPort, self).__init__(host, port_name, **kwargs)
-
- self._operating_system = 'android'
- self._version = 'icecreamsandwich'
-
- self._host_port = factory.PortFactory(host).get('chromium', **kwargs)
- self._server_process_constructor = self._android_server_process_constructor
-
- if hasattr(self._options, 'adb_device'):
- self._devices = self._options.adb_device
- else:
- self._devices = []
-
- @staticmethod
- def _android_server_process_constructor(port, server_name, cmd_line, env=None):
- return server_process.ServerProcess(port, server_name, cmd_line, env,
- universal_newlines=True, treat_no_data_as_crash=True)
-
- def additional_drt_flag(self):
- # The Chromium port for Android always uses the hardware GPU path.
- return ['--encode-binary', '--enable-hardware-gpu',
- '--force-compositing-mode',
- '--enable-accelerated-fixed-position']
-
- def default_timeout_ms(self):
- # Android platform has less computing power than desktop platforms.
- # Using 10 seconds allows us to pass most slow tests which are not
- # marked as slow tests on desktop platforms.
- return 10 * 1000
-
- def driver_stop_timeout(self):
- # DRT doesn't respond to closing stdin, so we might as well stop the driver immediately.
- return 0.0
-
- def default_child_processes(self):
- return len(self._get_devices())
-
- def default_baseline_search_path(self):
- return map(self._webkit_baseline_path, self.FALLBACK_PATHS)
-
- def check_wdiff(self, logging=True):
- return self._host_port.check_wdiff(logging)
-
- def check_build(self, needs_http):
- result = super(ChromiumAndroidPort, self).check_build(needs_http)
- result = self._check_file_exists(self.path_to_md5sum(), 'md5sum utility') and result
- result = self._check_file_exists(self.path_to_forwarder(), 'forwarder utility') and result
- if not result:
- _log.error('For complete Android build requirements, please see:')
- _log.error('')
- _log.error(' http://code.google.com/p/chromium/wiki/AndroidBuildInstructions')
-
- return result
-
- def check_sys_deps(self, needs_http):
- for (font_dirs, font_file, package) in HOST_FONT_FILES:
- exists = False
- for font_dir in font_dirs:
- font_path = font_dir + font_file
- if self._check_file_exists(font_path, '', logging=False):
- exists = True
- break
- if not exists:
- _log.error('You are missing %s under %s. Try installing %s. See build instructions.' % (font_file, font_dirs, package))
- return False
- return True
-
- def expectations_files(self):
- # LayoutTests/platform/chromium-android/TestExpectations should contain only the rules to
- # skip tests for the features not supported or not testable on Android.
- # Other rules should be in LayoutTests/platform/chromium/TestExpectations.
- android_expectations_file = self.path_from_webkit_base('LayoutTests', 'platform', 'chromium-android', 'TestExpectations')
- return super(ChromiumAndroidPort, self).expectations_files() + [android_expectations_file]
-
- def requires_http_server(self):
- """Chromium Android runs tests on devices, and uses the HTTP server to
- serve the actual layout tests to DumpRenderTree."""
- return True
-
- def start_http_server(self, additional_dirs=None, number_of_servers=0):
- if not additional_dirs:
- additional_dirs = {}
- additional_dirs[PERF_TEST_PATH_PREFIX] = self.perf_tests_dir()
- additional_dirs[LAYOUT_TEST_PATH_PREFIX] = self.layout_tests_dir()
- super(ChromiumAndroidPort, self).start_http_server(additional_dirs, number_of_servers)
-
- def create_driver(self, worker_number, no_timeout=False):
- # We don't want the default DriverProxy which is not compatible with our driver.
- # See comments in ChromiumAndroidDriver.start().
- return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'),
- # Force no timeout to avoid DumpRenderTree timeouts before NRWT.
- no_timeout=True)
-
- def driver_cmd_line(self):
- # Override to return the actual DumpRenderTree command line.
- return self.create_driver(0)._drt_cmd_line(self.get_option('pixel_tests'), [])
-
- def path_to_adb(self):
- if ChromiumAndroidPort._adb_path:
- return ChromiumAndroidPort._adb_path
-
- provided_adb_path = self.path_from_chromium_base('third_party', 'android_tools', 'sdk', 'platform-tools', 'adb')
-
- path_version = self._determine_adb_version('adb')
- provided_version = self._determine_adb_version(provided_adb_path)
- assert provided_version, 'The checked in Android SDK is missing. Are you sure you ran update-webkit --chromium-android?'
-
- if not path_version:
- ChromiumAndroidPort._adb_path = provided_adb_path
- elif provided_version > path_version:
- # FIXME: The Printer isn't initialized when this is called, so using _log would just show an unitialized logger error.
- print >> sys.stderr, 'The "adb" version in your path is older than the one checked in, consider updating your local Android SDK. Using the checked in one.'
- ChromiumAndroidPort._adb_path = provided_adb_path
- else:
- ChromiumAndroidPort._adb_path = 'adb'
-
- return ChromiumAndroidPort._adb_path
-
- def path_to_forwarder(self):
- return self._build_path('forwarder')
-
- def path_to_md5sum(self):
- return self._build_path(MD5SUM_DEVICE_FILE_NAME)
-
- # Overridden private functions.
-
- def _build_path(self, *comps):
- return self._host_port._build_path(*comps)
-
- def _build_path_with_configuration(self, configuration, *comps):
- return self._host_port._build_path_with_configuration(configuration, *comps)
-
- def _path_to_apache(self):
- return self._host_port._path_to_apache()
-
- def _path_to_apache_config_file(self):
- return self._host_port._path_to_apache_config_file()
-
- def _path_to_driver(self, configuration=None):
- return self._build_path_with_configuration(configuration, 'DumpRenderTree_apk/DumpRenderTree-debug.apk')
-
- def _path_to_helper(self):
- return None
-
- def _path_to_image_diff(self):
- return self._host_port._path_to_image_diff()
-
- def _path_to_lighttpd(self):
- return self._host_port._path_to_lighttpd()
-
- def _path_to_lighttpd_modules(self):
- return self._host_port._path_to_lighttpd_modules()
-
- def _path_to_lighttpd_php(self):
- return self._host_port._path_to_lighttpd_php()
-
- def _path_to_wdiff(self):
- return self._host_port._path_to_wdiff()
-
- def _shut_down_http_server(self, pid):
- return self._host_port._shut_down_http_server(pid)
-
- def _driver_class(self):
- return ChromiumAndroidDriver
-
- # Local private functions.
-
- def _determine_adb_version(self, adb_path):
- re_version = re.compile('^.*version ([\d\.]+)$')
- try:
- output = self._executive.run_command([adb_path, 'version'], error_handler=self._executive.ignore_error)
- except OSError:
- return None
- result = re_version.match(output)
- if not output or not result:
- return None
- return [int(n) for n in result.group(1).split('.')]
-
- def _get_devices(self):
- if not self._devices:
- re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
- result = self._executive.run_command([self.path_to_adb(), 'devices'], error_handler=self._executive.ignore_error)
- self._devices = re_device.findall(result)
- if not self._devices:
- raise AssertionError('No devices attached. Result of "adb devices": %s' % result)
- return self._devices
-
- def _get_device_serial(self, worker_number):
- devices = self._get_devices()
- if worker_number >= len(devices):
- raise AssertionError('Worker number exceeds available number of devices')
- return devices[worker_number]
-
-
-class ChromiumAndroidDriver(driver.Driver):
- def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
- super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
- self._cmd_line = None
- self._in_fifo_path = DEVICE_FIFO_PATH + 'stdin.fifo'
- self._out_fifo_path = DEVICE_FIFO_PATH + 'test.fifo'
- self._err_fifo_path = DEVICE_FIFO_PATH + 'stderr.fifo'
- self._read_stdout_process = None
- self._read_stderr_process = None
- self._forwarder_process = None
- self._has_setup = False
- self._original_governors = {}
- self._device_serial = port._get_device_serial(worker_number)
- self._adb_command = [port.path_to_adb(), '-s', self._device_serial]
-
- def __del__(self):
- self._teardown_performance()
- super(ChromiumAndroidDriver, self).__del__()
-
- def _setup_md5sum_and_push_data_if_needed(self):
- self._md5sum_path = self._port.path_to_md5sum()
- if not self._file_exists_on_device(MD5SUM_DEVICE_PATH):
- if not self._push_to_device(self._md5sum_path, MD5SUM_DEVICE_PATH):
- raise AssertionError('Could not push md5sum to device')
-
- self._push_executable()
- self._push_fonts()
- self._push_test_resources()
-
- def _setup_test(self):
- if self._has_setup:
- return
-
- self._restart_adb_as_root()
- self._setup_md5sum_and_push_data_if_needed()
- self._has_setup = True
- self._setup_performance()
-
- # Required by webkit_support::GetWebKitRootDirFilePath().
- # Other directories will be created automatically by adb push.
- self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_SOURCE_ROOT_DIR + 'chrome'])
-
- # Allow the DumpRenderTree app to fully access the directory.
- # The native code needs the permission to write temporary files and create pipes here.
- self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_DRT_DIR])
- self._run_adb_command(['shell', 'chmod', '777', DEVICE_DRT_DIR])
-
- # Delete the disk cache if any to ensure a clean test run.
- # This is like what's done in ChromiumPort.setup_test_run but on the device.
- self._run_adb_command(['shell', 'rm', '-r', DRT_APP_CACHE_DIR])
-
- def _log_error(self, message):
- _log.error('[%s] %s' % (self._device_serial, message))
-
- def _log_debug(self, message):
- _log.debug('[%s] %s' % (self._device_serial, message))
-
- def _abort(self, message):
- raise AssertionError('[%s] %s' % (self._device_serial, message))
-
- @staticmethod
- def _extract_hashes_from_md5sum_output(md5sum_output):
- assert md5sum_output
- return [line.split(' ')[0] for line in md5sum_output]
-
- def _push_file_if_needed(self, host_file, device_file):
- assert os.path.exists(host_file)
- device_hashes = self._extract_hashes_from_md5sum_output(
- self._port.host.executive.popen(self._adb_command + ['shell', MD5SUM_DEVICE_PATH, device_file],
- stdout=subprocess.PIPE).stdout)
- host_hashes = self._extract_hashes_from_md5sum_output(
- self._port.host.executive.popen(args=['%s_host' % self._md5sum_path, host_file],
- stdout=subprocess.PIPE).stdout)
- if host_hashes and device_hashes == host_hashes:
- return
- self._push_to_device(host_file, device_file)
-
- def _push_executable(self):
- self._push_file_if_needed(self._port.path_to_forwarder(), DEVICE_FORWARDER_PATH)
- self._push_file_if_needed(self._port._build_path('DumpRenderTree.pak'), DEVICE_DRT_DIR + 'DumpRenderTree.pak')
- self._push_file_if_needed(self._port._build_path('DumpRenderTree_resources'), DEVICE_DRT_DIR + 'DumpRenderTree_resources')
- self._push_file_if_needed(self._port._build_path('android_main_fonts.xml'), DEVICE_DRT_DIR + 'android_main_fonts.xml')
- self._push_file_if_needed(self._port._build_path('android_fallback_fonts.xml'), DEVICE_DRT_DIR + 'android_fallback_fonts.xml')
- self._run_adb_command(['uninstall', DRT_APP_PACKAGE])
- drt_host_path = self._port._path_to_driver()
- install_result = self._run_adb_command(['install', drt_host_path])
- if install_result.find('Success') == -1:
- self._abort('Failed to install %s onto device: %s' % (drt_host_path, install_result))
-
- def _push_fonts(self):
- self._log_debug('Pushing fonts')
- path_to_ahem_font = self._port._build_path('AHEM____.TTF')
- self._push_file_if_needed(path_to_ahem_font, DEVICE_FONTS_DIR + 'AHEM____.TTF')
- for (host_dirs, font_file, package) in HOST_FONT_FILES:
- for host_dir in host_dirs:
- host_font_path = host_dir + font_file
- if self._port._check_file_exists(host_font_path, '', logging=False):
- self._push_file_if_needed(host_font_path, DEVICE_FONTS_DIR + font_file)
-
- def _push_test_resources(self):
- self._log_debug('Pushing test resources')
- for resource in TEST_RESOURCES_TO_PUSH:
- self._push_file_if_needed(self._port.layout_tests_dir() + '/' + resource, DEVICE_LAYOUT_TESTS_DIR + resource)
-
- def _restart_adb_as_root(self):
- output = self._run_adb_command(['root'])
- if 'adbd is already running as root' in output:
- return
- elif not 'restarting adbd as root' in output:
- self._log_error('Unrecognized output from adb root: %s' % output)
-
- # Regardless the output, give the device a moment to come back online.
- self._run_adb_command(['wait-for-device'])
-
- def _run_adb_command(self, cmd, ignore_error=False):
- self._log_debug('Run adb command: ' + str(cmd))
- if ignore_error:
- error_handler = self._port._executive.ignore_error
- else:
- error_handler = None
- result = self._port._executive.run_command(self._adb_command + cmd, error_handler=error_handler)
- # Limit the length to avoid too verbose output of commands like 'adb logcat' and 'cat /data/tombstones/tombstone01'
- # whose outputs are normally printed in later logs.
- self._log_debug('Run adb result: ' + result[:80])
- return result
-
- def _link_device_file(self, from_file, to_file, ignore_error=False):
- # rm to_file first to make sure that ln succeeds.
- self._run_adb_command(['shell', 'rm', to_file], ignore_error)
- return self._run_adb_command(['shell', 'ln', '-s', from_file, to_file], ignore_error)
-
- def _push_to_device(self, host_path, device_path, ignore_error=False):
- return self._run_adb_command(['push', host_path, device_path], ignore_error)
-
- def _pull_from_device(self, device_path, host_path, ignore_error=False):
- return self._run_adb_command(['pull', device_path, host_path], ignore_error)
-
- def _get_last_stacktrace(self):
- tombstones = self._run_adb_command(['shell', 'ls', '-n', '/data/tombstones'])
- if not tombstones or tombstones.startswith('/data/tombstones: No such file or directory'):
- self._log_error('DRT crashed, but no tombstone found!')
- return ''
- tombstones = tombstones.rstrip().split('\n')
- last_tombstone = tombstones[0].split()
- for tombstone in tombstones[1:]:
- # Format of fields:
- # 0 1 2 3 4 5 6
- # permission uid gid size date time filename
- # -rw------- 1000 1000 45859 2011-04-13 06:00 tombstone_00
- fields = tombstone.split()
- if (fields[4] + fields[5] >= last_tombstone[4] + last_tombstone[5]):
- last_tombstone = fields
- else:
- break
-
- # Use Android tool vendor/google/tools/stack to convert the raw
- # stack trace into a human readable format, if needed.
- # It takes a long time, so don't do it here.
- return '%s\n%s' % (' '.join(last_tombstone),
- self._run_adb_command(['shell', 'cat', '/data/tombstones/' + last_tombstone[6]]))
-
- def _get_logcat(self):
- return self._run_adb_command(['logcat', '-d', '-v', 'threadtime'])
-
- def _setup_performance(self):
- # Disable CPU scaling and drop ram cache to reduce noise in tests
- if not self._original_governors:
- governor_files = self._run_adb_command(['shell', 'ls', SCALING_GOVERNORS_PATTERN])
- if governor_files.find('No such file or directory') == -1:
- for file in governor_files.split():
- self._original_governors[file] = self._run_adb_command(['shell', 'cat', file]).strip()
- self._run_adb_command(['shell', 'echo', 'performance', '>', file])
-
- def _teardown_performance(self):
- for file, original_content in self._original_governors.items():
- self._run_adb_command(['shell', 'echo', original_content, '>', file])
- self._original_governors = {}
-
- def _get_crash_log(self, stdout, stderr, newer_than):
- if not stdout:
- stdout = ''
- stdout += '********* [%s] Logcat:\n%s' % (self._device_serial, self._get_logcat())
- if not stderr:
- stderr = ''
- stderr += '********* [%s] Tombstone file:\n%s' % (self._device_serial, self._get_last_stacktrace())
- return super(ChromiumAndroidDriver, self)._get_crash_log(stdout, stderr, newer_than)
-
- def cmd_line(self, pixel_tests, per_test_args):
- # The returned command line is used to start _server_process. In our case, it's an interactive 'adb shell'.
- # The command line passed to the DRT process is returned by _drt_cmd_line() instead.
- return self._adb_command + ['shell']
-
- def _file_exists_on_device(self, full_file_path):
- assert full_file_path.startswith('/')
- return self._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path
-
- def _drt_cmd_line(self, pixel_tests, per_test_args):
- return driver.Driver.cmd_line(self, pixel_tests, per_test_args) + ['--create-stdin-fifo', '--separate-stderr-fifo']
-
- @staticmethod
- def _loop_with_timeout(condition, timeout_secs):
- deadline = time.time() + timeout_secs
- while time.time() < deadline:
- if condition():
- return True
- return False
-
- def _all_pipes_created(self):
- return (self._file_exists_on_device(self._in_fifo_path) and
- self._file_exists_on_device(self._out_fifo_path) and
- self._file_exists_on_device(self._err_fifo_path))
-
- def _remove_all_pipes(self):
- for file in [self._in_fifo_path, self._out_fifo_path, self._err_fifo_path]:
- self._run_adb_command(['shell', 'rm', file])
-
- return (not self._file_exists_on_device(self._in_fifo_path) and
- not self._file_exists_on_device(self._out_fifo_path) and
- not self._file_exists_on_device(self._err_fifo_path))
-
- def run_test(self, driver_input, stop_when_done):
- base = self._port.lookup_virtual_test_base(driver_input.test_name)
- if base:
- driver_input = copy.copy(driver_input)
- driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
- driver_input.test_name = base
- return super(ChromiumAndroidDriver, self).run_test(driver_input, stop_when_done)
-
- def start(self, pixel_tests, per_test_args):
- # Only one driver instance is allowed because of the nature of Android activity.
- # The single driver needs to restart DumpRenderTree when the command line changes.
- cmd_line = self._drt_cmd_line(pixel_tests, per_test_args)
- if cmd_line != self._cmd_line:
- self.stop()
- self._cmd_line = cmd_line
- super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args)
-
- def _start(self, pixel_tests, per_test_args):
- self._setup_test()
-
- for retries in range(3):
- if self._start_once(pixel_tests, per_test_args):
- return
- self._log_error('Failed to start DumpRenderTree application. Retries=%d. Log:%s' % (retries, self._get_logcat()))
- self.stop()
- time.sleep(2)
- self._abort('Failed to start DumpRenderTree application multiple times. Give up.')
-
- def _start_once(self, pixel_tests, per_test_args):
- super(ChromiumAndroidDriver, self)._start(pixel_tests, per_test_args)
-
- self._log_debug('Starting forwarder')
- self._forwarder_process = self._port._server_process_constructor(
- self._port, 'Forwarder', self._adb_command + ['shell', '%s -D %s' % (DEVICE_FORWARDER_PATH, FORWARD_PORTS)])
- self._forwarder_process.start()
-
- self._run_adb_command(['logcat', '-c'])
- self._run_adb_command(['shell', 'echo'] + self._cmd_line + ['>', COMMAND_LINE_FILE])
- start_result = self._run_adb_command(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', DRT_ACTIVITY_FULL_NAME])
- if start_result.find('Exception') != -1:
- self._log_error('Failed to start DumpRenderTree application. Exception:\n' + start_result)
- return False
-
- if not ChromiumAndroidDriver._loop_with_timeout(self._all_pipes_created, DRT_START_STOP_TIMEOUT_SECS):
- return False
-
- # Read back the shell prompt to ensure adb shell ready.
- deadline = time.time() + DRT_START_STOP_TIMEOUT_SECS
- self._server_process.start()
- self._read_prompt(deadline)
- self._log_debug('Interactive shell started')
-
- # Start a process to read from the stdout fifo of the DumpRenderTree app and print to stdout.
- self._log_debug('Redirecting stdout to ' + self._out_fifo_path)
- self._read_stdout_process = self._port._server_process_constructor(
- self._port, 'ReadStdout', self._adb_command + ['shell', 'cat', self._out_fifo_path])
- self._read_stdout_process.start()
-
- # Start a process to read from the stderr fifo of the DumpRenderTree app and print to stdout.
- self._log_debug('Redirecting stderr to ' + self._err_fifo_path)
- self._read_stderr_process = self._port._server_process_constructor(
- self._port, 'ReadStderr', self._adb_command + ['shell', 'cat', self._err_fifo_path])
- self._read_stderr_process.start()
-
- self._log_debug('Redirecting stdin to ' + self._in_fifo_path)
- self._server_process.write('cat >%s\n' % self._in_fifo_path)
-
- # Combine the stdout and stderr pipes into self._server_process.
- self._server_process.replace_outputs(self._read_stdout_process._proc.stdout, self._read_stderr_process._proc.stdout)
-
- def deadlock_detector(processes, normal_startup_event):
- if not ChromiumAndroidDriver._loop_with_timeout(lambda: normal_startup_event.is_set(), DRT_START_STOP_TIMEOUT_SECS):
- # If normal_startup_event is not set in time, the main thread must be blocked at
- # reading/writing the fifo. Kill the fifo reading/writing processes to let the
- # main thread escape from the deadlocked state. After that, the main thread will
- # treat this as a crash.
- self._log_error('Deadlock detected. Processes killed.')
- for i in processes:
- i.kill()
-
- # Start a thread to kill the pipe reading/writing processes on deadlock of the fifos during startup.
- normal_startup_event = threading.Event()
- threading.Thread(name='DeadlockDetector', target=deadlock_detector,
- args=([self._server_process, self._read_stdout_process, self._read_stderr_process], normal_startup_event)).start()
-
- output = ''
- line = self._server_process.read_stdout_line(deadline)
- while not self._server_process.timed_out and not self.has_crashed() and line.rstrip() != '#READY':
- output += line
- line = self._server_process.read_stdout_line(deadline)
-
- if self._server_process.timed_out and not self.has_crashed():
- # DumpRenderTree crashes during startup, or when the deadlock detector detected
- # deadlock and killed the fifo reading/writing processes.
- _log.error('Failed to start DumpRenderTree: \n%s' % output)
- return False
- else:
- # Inform the deadlock detector that the startup is successful without deadlock.
- normal_startup_event.set()
- return True
-
- def stop(self):
- self._run_adb_command(['shell', 'am', 'force-stop', DRT_APP_PACKAGE])
-
- if self._read_stdout_process:
- self._read_stdout_process.kill()
- self._read_stdout_process = None
-
- if self._read_stderr_process:
- self._read_stderr_process.kill()
- self._read_stderr_process = None
-
- super(ChromiumAndroidDriver, self).stop()
-
- if self._forwarder_process:
- self._forwarder_process.kill()
- self._forwarder_process = None
-
- if self._has_setup:
- if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
- raise AssertionError('Failed to remove fifo files. May be locked.')
-
- def _command_from_driver_input(self, driver_input):
- command = super(ChromiumAndroidDriver, self)._command_from_driver_input(driver_input)
- if command.startswith('/'):
- fs = self._port._filesystem
- # FIXME: what happens if command lies outside of the layout_tests_dir on the host?
- relative_test_filename = fs.relpath(command, fs.dirname(self._port.layout_tests_dir()))
- command = DEVICE_WEBKIT_BASE_DIR + relative_test_filename
- return command
-
- def _read_prompt(self, deadline):
- last_char = ''
- while True:
- current_char = self._server_process.read_stdout(deadline, 1)
- if current_char == ' ':
- if last_char in ('#', '$'):
- return
- last_char = current_char
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
deleted file mode 100644
index cfbc646df..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# Copyright (C) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import optparse
-import StringIO
-import time
-import unittest
-import sys
-
-from webkitpy.common.system import executive_mock
-from webkitpy.common.system.executive_mock import MockExecutive2
-from webkitpy.common.system.systemhost_mock import MockSystemHost
-
-from webkitpy.layout_tests.port import chromium_android
-from webkitpy.layout_tests.port import chromium_port_testcase
-from webkitpy.layout_tests.port import driver
-from webkitpy.layout_tests.port import driver_unittest
-from webkitpy.tool.mocktool import MockOptions
-
-class MockRunCommand(object):
- def __init__(self):
- self._mock_logcat = ''
- self._mock_devices_output = ''
- self._mock_devices = []
- self._mock_ls_tombstones = ''
-
- def mock_run_command_fn(self, args):
- if not args[0].endswith('adb'):
- return ''
- if args[1] == 'devices':
- return self._mock_devices_output
- if args[1] == 'version':
- return 'version 1.0'
-
- assert len(args) > 3
- assert args[1] == '-s'
- assert args[2] in self._mock_devices
- if args[3] == 'shell':
- if args[4:] == ['ls', '-n', '/data/tombstones']:
- return self._mock_ls_tombstones
- elif args[4] == 'cat':
- return args[5] + '\nmock_contents\n'
- elif args[3] == 'logcat':
- return self._mock_logcat
- return ''
-
- def mock_no_device(self):
- self._mock_devices = []
- self._mock_devices_output = 'List of devices attached'
-
- def mock_one_device(self):
- self._mock_devices = ['123456789ABCDEF0']
- self._mock_devices_output = ('List of devices attached\n'
- '%s\tdevice\n' % self._mock_devices[0])
-
- def mock_two_devices(self):
- self._mock_devices = ['123456789ABCDEF0', '23456789ABCDEF01']
- self._mock_devices_output = ('* daemon not running. starting it now on port 5037 *'
- '* daemon started successfully *'
- 'List of devices attached\n'
- '%s\tdevice\n'
- '%s\tdevice\n' % (self._mock_devices[0], self._mock_devices[1]))
-
- def mock_no_tombstone_dir(self):
- self._mock_ls_tombstones = '/data/tombstones: No such file or directory'
-
- def mock_no_tombstone_file(self):
- self._mock_ls_tombstones = ''
-
- def mock_ten_tombstones(self):
- self._mock_ls_tombstones = ('-rw------- 1000 1000 218643 2012-04-26 18:15 tombstone_00\n'
- '-rw------- 1000 1000 241695 2012-04-26 18:15 tombstone_01\n'
- '-rw------- 1000 1000 219472 2012-04-26 18:16 tombstone_02\n'
- '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- '-rw------- 1000 1000 82022 2012-04-23 16:57 tombstone_04\n'
- '-rw------- 1000 1000 82015 2012-04-23 16:57 tombstone_05\n'
- '-rw------- 1000 1000 81974 2012-04-23 16:58 tombstone_06\n'
- '-rw------- 1000 1000 237409 2012-04-26 17:41 tombstone_07\n'
- '-rw------- 1000 1000 276089 2012-04-26 18:15 tombstone_08\n'
- '-rw------- 1000 1000 219618 2012-04-26 18:15 tombstone_09\n')
-
- def mock_logcat(self, content):
- self._mock_logcat = content
-
-
-class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
- port_name = 'chromium-android'
- port_maker = chromium_android.ChromiumAndroidPort
-
- def make_port(self, **kwargs):
- port = super(ChromiumAndroidPortTest, self).make_port(**kwargs)
- self.mock_run_command = MockRunCommand()
- self.mock_run_command.mock_one_device()
- port._executive = MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)
- return port
-
- def test_attributes(self):
- port = self.make_port()
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-android'))
-
- def test_default_timeout_ms(self):
- self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Release'})).default_timeout_ms(), 10000)
- self.assertEqual(self.make_port(options=optparse.Values({'configuration': 'Debug'})).default_timeout_ms(), 10000)
-
- def test_expectations_files(self):
- # FIXME: override this test temporarily while we're still upstreaming the android port and
- # using a custom expectations file.
- pass
-
- def test_get_devices_no_device(self):
- port = self.make_port()
- self.mock_run_command.mock_no_device()
- self.assertRaises(AssertionError, port._get_devices)
-
- def test_get_devices_one_device(self):
- port = self.make_port()
- self.mock_run_command.mock_one_device()
- self.assertEqual(self.mock_run_command._mock_devices, port._get_devices())
- self.assertEqual(1, port.default_child_processes())
-
- def test_get_devices_two_devices(self):
- port = self.make_port()
- self.mock_run_command.mock_two_devices()
- self.assertEqual(self.mock_run_command._mock_devices, port._get_devices())
- self.assertEqual(2, port.default_child_processes())
-
- def test_get_device_serial_no_device(self):
- port = self.make_port()
- self.mock_run_command.mock_no_device()
- self.assertRaises(AssertionError, port._get_device_serial, 0)
-
- def test_get_device_serial_one_device(self):
- port = self.make_port()
- self.mock_run_command.mock_one_device()
- self.assertEqual(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
- self.assertRaises(AssertionError, port._get_device_serial, 1)
-
- def test_get_device_serial_two_devices(self):
- port = self.make_port()
- self.mock_run_command.mock_two_devices()
- self.assertEqual(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
- self.assertEqual(self.mock_run_command._mock_devices[1], port._get_device_serial(1))
- self.assertRaises(AssertionError, port._get_device_serial, 2)
-
- def test_must_require_http_server(self):
- port = self.make_port()
- self.assertEqual(port.requires_http_server(), True)
-
-
-class ChromiumAndroidDriverTest(unittest.TestCase):
- def setUp(self):
- self.mock_run_command = MockRunCommand()
- self.mock_run_command.mock_one_device()
- self.port = chromium_android.ChromiumAndroidPort(
- MockSystemHost(executive=MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)),
- 'chromium-android')
- self.driver = chromium_android.ChromiumAndroidDriver(self.port, worker_number=0, pixel_tests=True)
-
- def test_get_last_stacktrace(self):
- self.mock_run_command.mock_no_tombstone_dir()
- self.assertEqual(self.driver._get_last_stacktrace(), '')
-
- self.mock_run_command.mock_no_tombstone_file()
- self.assertEqual(self.driver._get_last_stacktrace(), '')
-
- self.mock_run_command.mock_ten_tombstones()
- self.assertEqual(self.driver._get_last_stacktrace(),
- '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- '/data/tombstones/tombstone_03\nmock_contents\n')
-
- def test_get_crash_log(self):
- self.mock_run_command.mock_logcat('logcat contents\n')
- self.mock_run_command.mock_ten_tombstones()
- self.driver._crashed_process_name = 'foo'
- self.driver._crashed_pid = 1234
- self.assertEqual(self.driver._get_crash_log('out bar\nout baz\n', 'err bar\nerr baz\n', newer_than=None),
- ('err bar\n'
- 'err baz\n'
- '********* [123456789ABCDEF0] Tombstone file:\n'
- '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- '/data/tombstones/tombstone_03\n'
- 'mock_contents\n',
- u'crash log for foo (pid 1234):\n'
- u'STDOUT: out bar\n'
- u'STDOUT: out baz\n'
- u'STDOUT: ********* [123456789ABCDEF0] Logcat:\n'
- u'STDOUT: logcat contents\n'
- u'STDERR: err bar\n'
- u'STDERR: err baz\n'
- u'STDERR: ********* [123456789ABCDEF0] Tombstone file:\n'
- u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- u'STDERR: /data/tombstones/tombstone_03\n'
- u'STDERR: mock_contents\n'))
-
- self.driver._crashed_process_name = None
- self.driver._crashed_pid = None
- self.assertEqual(self.driver._get_crash_log(None, None, newer_than=None),
- ('********* [123456789ABCDEF0] Tombstone file:\n'
- '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- '/data/tombstones/tombstone_03\n'
- 'mock_contents\n',
- u'crash log for <unknown process name> (pid <unknown>):\n'
- u'STDOUT: ********* [123456789ABCDEF0] Logcat:\n'
- u'STDOUT: logcat contents\n'
- u'STDERR: ********* [123456789ABCDEF0] Tombstone file:\n'
- u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- u'STDERR: /data/tombstones/tombstone_03\n'
- u'STDERR: mock_contents\n'))
-
- def test_cmd_line(self):
- cmd_line = self.driver.cmd_line(True, ['anything'])
- self.assertEqual(['adb', '-s', self.mock_run_command._mock_devices[0], 'shell'], cmd_line)
-
- def test_drt_cmd_line(self):
- cmd_line = self.driver._drt_cmd_line(True, ['--a'])
- self.assertTrue('--a' in cmd_line)
- self.assertTrue('--create-stdin-fifo' in cmd_line)
- self.assertTrue('--separate-stderr-fifo' in cmd_line)
-
- def test_read_prompt(self):
- self.driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # '])
- self.assertEqual(self.driver._read_prompt(time.time() + 1), None)
- self.driver._server_process = driver_unittest.MockServerProcess(lines=['$ '])
- self.assertEqual(self.driver._read_prompt(time.time() + 1), None)
-
- def test_command_from_driver_input(self):
- driver_input = driver.DriverInput('foo/bar/test.html', 10, 'checksum', True)
- expected_command = "/data/local/tmp/third_party/WebKit/LayoutTests/foo/bar/test.html'--pixel-test'checksum\n"
- if (sys.platform != "cygwin"):
- self.assertEqual(self.driver._command_from_driver_input(driver_input), expected_command)
-
- driver_input = driver.DriverInput('http/tests/foo/bar/test.html', 10, 'checksum', True)
- expected_command = "http://127.0.0.1:8000/foo/bar/test.html'--pixel-test'checksum\n"
- self.assertEqual(self.driver._command_from_driver_input(driver_input), expected_command)
-
-
-class ChromiumAndroidDriverTwoDriversTest(unittest.TestCase):
- def test_two_drivers(self):
- mock_run_command = MockRunCommand()
- mock_run_command.mock_two_devices()
- port = chromium_android.ChromiumAndroidPort(
- MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
- 'chromium-android')
- driver0 = chromium_android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True)
- driver1 = chromium_android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True)
-
- cmd_line0 = driver0.cmd_line(True, ['anything'])
- self.assertEqual(['adb', '-s', mock_run_command._mock_devices[0], 'shell'], cmd_line0)
-
- cmd_line1 = driver1.cmd_line(True, ['anything'])
- self.assertEqual(['adb', '-s', mock_run_command._mock_devices[1], 'shell'], cmd_line1)
-
-
-class ChromiumAndroidTwoPortsTest(unittest.TestCase):
- def test_options_with_two_ports(self):
- options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
- mock_run_command = MockRunCommand()
- mock_run_command.mock_two_devices()
- port0 = chromium_android.ChromiumAndroidPort(
- MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
- 'chromium-android', options=options)
- port1 = chromium_android.ChromiumAndroidPort(
- MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
- 'chromium-android', options=options)
- cmd_line = port1.driver_cmd_line()
- self.assertEqual(cmd_line.count('--encode-binary'), 1)
- self.assertEqual(cmd_line.count('--enable-hardware-gpu'), 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
deleted file mode 100644
index dfacf9c08..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux.py
+++ /dev/null
@@ -1,171 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-
-from webkitpy.common.webkit_finder import WebKitFinder
-from webkitpy.layout_tests.port import chromium
-from webkitpy.layout_tests.port import config
-
-
-_log = logging.getLogger(__name__)
-
-
-class ChromiumLinuxPort(chromium.ChromiumPort):
- port_name = 'chromium-linux'
-
- SUPPORTED_ARCHITECTURES = ('x86', 'x86_64')
-
- FALLBACK_PATHS = {
- 'x86_64': [
- 'chromium-linux',
- 'chromium-win',
- 'chromium',
- ],
- 'x86': [
- 'chromium-linux-x86',
- 'chromium-linux',
- 'chromium-win',
- 'chromium',
- ],
- }
-
- DEFAULT_BUILD_DIRECTORIES = ('sconsbuild', 'out')
-
- @classmethod
- def _determine_driver_path_statically(cls, host, options):
- config_object = config.Config(host.executive, host.filesystem)
- build_directory = getattr(options, 'build_directory', None)
- webkit_base = WebKitFinder(host.filesystem).webkit_base()
- chromium_base = cls._chromium_base_dir(host.filesystem)
- if hasattr(options, 'configuration') and options.configuration:
- configuration = options.configuration
- else:
- configuration = config_object.default_configuration()
- return cls._static_build_path(host.filesystem, build_directory, chromium_base, webkit_base, configuration, ['DumpRenderTree'])
-
- @staticmethod
- def _determine_architecture(filesystem, executive, driver_path):
- file_output = ''
- if filesystem.exists(driver_path):
- # The --dereference flag tells file to follow symlinks
- file_output = executive.run_command(['file', '--dereference', driver_path], return_stderr=True)
-
- if 'ELF 32-bit LSB executable' in file_output:
- return 'x86'
- if 'ELF 64-bit LSB executable' in file_output:
- return 'x86_64'
- if file_output:
- _log.warning('Could not determine architecture from "file" output: %s' % file_output)
-
- # We don't know what the architecture is; default to 'x86' because
- # maybe we're rebaselining and the binary doesn't actually exist,
- # or something else weird is going on. It's okay to do this because
- # if we actually try to use the binary, check_build() should fail.
- return 'x86_64'
-
- @classmethod
- def determine_full_port_name(cls, host, options, port_name):
- if port_name.endswith('-linux'):
- return port_name + '-' + cls._determine_architecture(host.filesystem, host.executive, cls._determine_driver_path_statically(host, options))
- return port_name
-
- def __init__(self, host, port_name, **kwargs):
- chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
- (base, arch) = port_name.rsplit('-', 1)
- assert base == 'chromium-linux'
- assert arch in self.SUPPORTED_ARCHITECTURES
- assert port_name in ('chromium-linux', 'chromium-linux-x86', 'chromium-linux-x86_64')
- self._version = 'lucid' # We only support lucid right now.
- self._architecture = arch
-
- def default_baseline_search_path(self):
- port_names = self.FALLBACK_PATHS[self._architecture]
- return map(self._webkit_baseline_path, port_names)
-
- def _modules_to_search_for_symbols(self):
- return [self._build_path('libffmpegsumo.so')]
-
- def check_build(self, needs_http):
- result = chromium.ChromiumPort.check_build(self, needs_http)
- if not result:
- _log.error('For complete Linux build requirements, please see:')
- _log.error('')
- _log.error(' http://code.google.com/p/chromium/wiki/LinuxBuildInstructions')
- return result
-
- def operating_system(self):
- return 'linux'
-
- #
- # PROTECTED METHODS
- #
-
- def _check_apache_install(self):
- result = self._check_file_exists(self._path_to_apache(), "apache2")
- result = self._check_file_exists(self._path_to_apache_config_file(), "apache2 config file") and result
- if not result:
- _log.error(' Please install using: "sudo apt-get install apache2 libapache2-mod-php5"')
- _log.error('')
- return result
-
- def _check_lighttpd_install(self):
- result = self._check_file_exists(
- self._path_to_lighttpd(), "LigHTTPd executable")
- result = self._check_file_exists(self._path_to_lighttpd_php(), "PHP CGI executable") and result
- result = self._check_file_exists(self._path_to_lighttpd_modules(), "LigHTTPd modules") and result
- if not result:
- _log.error(' Please install using: "sudo apt-get install lighttpd php5-cgi"')
- _log.error('')
- return result
-
- def _wdiff_missing_message(self):
- return 'wdiff is not installed; please install using "sudo apt-get install wdiff"'
-
- def _path_to_apache(self):
- if self._is_redhat_based():
- return '/usr/sbin/httpd'
- else:
- return '/usr/sbin/apache2'
-
- def _path_to_lighttpd(self):
- return "/usr/sbin/lighttpd"
-
- def _path_to_lighttpd_modules(self):
- return "/usr/lib/lighttpd"
-
- def _path_to_lighttpd_php(self):
- return "/usr/bin/php-cgi"
-
- def _path_to_driver(self, configuration=None):
- binary_name = self.driver_name()
- return self._build_path_with_configuration(configuration, binary_name)
-
- def _path_to_helper(self):
- return None
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
deleted file mode 100644
index 4160034e3..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_linux_unittest.py
+++ /dev/null
@@ -1,118 +0,0 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest
-
-from webkitpy.common.system import executive_mock
-from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.tool.mocktool import MockOptions
-
-from webkitpy.layout_tests.port import chromium_linux
-from webkitpy.layout_tests.port import chromium_port_testcase
-
-
-class ChromiumLinuxPortTest(chromium_port_testcase.ChromiumPortTestCase):
- port_name = 'chromium-linux'
- port_maker = chromium_linux.ChromiumLinuxPort
-
- def assert_architecture(self, port_name=None, file_output=None, expected_architecture=None):
- host = MockSystemHost()
- host.filesystem.exists = lambda x: 'DumpRenderTree' in x
- if file_output:
- host.executive = executive_mock.MockExecutive2(file_output)
-
- port = self.make_port(host, port_name=port_name)
- self.assertEqual(port.architecture(), expected_architecture)
- if expected_architecture == 'x86':
- self.assertTrue(port.baseline_path().endswith('chromium-linux-x86'))
- self.assertTrue(port.baseline_search_path()[0].endswith('chromium-linux-x86'))
- self.assertTrue(port.baseline_search_path()[1].endswith('chromium-linux'))
- else:
- self.assertTrue(port.baseline_path().endswith('chromium-linux'))
- self.assertTrue(port.baseline_search_path()[0].endswith('chromium-linux'))
-
- def test_architectures(self):
- self.assert_architecture(port_name='chromium-linux-x86',
- expected_architecture='x86')
- self.assert_architecture(port_name='chromium-linux-x86_64',
- expected_architecture='x86_64')
- self.assert_architecture(file_output='ELF 32-bit LSB executable',
- expected_architecture='x86')
- self.assert_architecture(file_output='ELF 64-bit LSB executable',
- expected_architecture='x86_64')
-
- def test_check_illegal_port_names(self):
- # FIXME: Check that, for now, these are illegal port names.
- # Eventually we should be able to do the right thing here.
- self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, MockSystemHost(), port_name='chromium-x86-linux')
-
- def test_determine_architecture_fails(self):
- # Test that we default to 'x86' if the driver doesn't exist.
- port = self.make_port()
- self.assertEqual(port.architecture(), 'x86_64')
-
- # Test that we default to 'x86' on an unknown architecture.
- host = MockSystemHost()
- host.filesystem.exists = lambda x: True
- host.executive = executive_mock.MockExecutive2('win32')
- port = self.make_port(host=host)
- self.assertEqual(port.architecture(), 'x86_64')
-
- # Test that we raise errors if something weird happens.
- host.executive = executive_mock.MockExecutive2(exception=AssertionError)
- self.assertRaises(AssertionError, chromium_linux.ChromiumLinuxPort, host, self.port_name)
-
- def test_operating_system(self):
- self.assertEqual('linux', self.make_port().operating_system())
-
- def test_build_path(self):
- # Test that optional paths are used regardless of whether they exist.
- options = MockOptions(configuration='Release', build_directory='/foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
-
- # Test that optional relative paths are returned unmodified.
- options = MockOptions(configuration='Release', build_directory='foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
-
- # Test that we look in a chromium directory before the webkit directory.
- options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
-
- # Test that we prefer the legacy dir over the new dir.
- options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/sconsbuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/sconsbuild/Release')
-
- def test_driver_name_option(self):
- self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
- self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
-
- def test_path_to_image_diff(self):
- self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/ImageDiff')
-
-if __name__ == '__main__':
- port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
deleted file mode 100644
index bd65cd1dc..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Chromium Mac implementation of the Port interface."""
-
-import logging
-import signal
-
-from webkitpy.layout_tests.port import chromium
-
-
-_log = logging.getLogger(__name__)
-
-
-class ChromiumMacPort(chromium.ChromiumPort):
- SUPPORTED_OS_VERSIONS = ('snowleopard', 'lion', 'mountainlion', 'future')
- port_name = 'chromium-mac'
-
- FALLBACK_PATHS = {
- 'snowleopard': [
- 'chromium-mac-snowleopard',
- 'chromium-mac-lion',
- 'chromium-mac',
- 'chromium',
- ],
- 'lion': [
- 'chromium-mac-lion',
- 'chromium-mac',
- 'chromium',
- ],
- 'mountainlion': [
- 'chromium-mac',
- 'chromium',
- ],
- 'future': [
- 'chromium-mac',
- 'chromium',
- ],
- }
-
- DEFAULT_BUILD_DIRECTORIES = ('xcodebuild', 'out')
-
- @classmethod
- def determine_full_port_name(cls, host, options, port_name):
- if port_name.endswith('-mac'):
- return port_name + '-' + host.platform.os_version
- return port_name
-
- def __init__(self, host, port_name, **kwargs):
- chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
- self._version = port_name[port_name.index('chromium-mac-') + len('chromium-mac-'):]
- assert self._version in self.SUPPORTED_OS_VERSIONS
-
- def _modules_to_search_for_symbols(self):
- return [self._build_path('ffmpegsumo.so')]
-
- def check_build(self, needs_http):
- result = chromium.ChromiumPort.check_build(self, needs_http)
- if not result:
- _log.error('For complete Mac build requirements, please see:')
- _log.error('')
- _log.error(' http://code.google.com/p/chromium/wiki/MacBuildInstructions')
-
- return result
-
- def operating_system(self):
- return 'mac'
-
- #
- # PROTECTED METHODS
- #
-
- def _lighttpd_path(self, *comps):
- return self.path_from_chromium_base('third_party', 'lighttpd', 'mac', *comps)
-
- def _wdiff_missing_message(self):
- return 'wdiff is not installed; please install from MacPorts or elsewhere'
-
- def _path_to_apache(self):
- return '/usr/sbin/httpd'
-
- def _path_to_apache_config_file(self):
- return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'apache2-httpd.conf')
-
- def _path_to_lighttpd(self):
- return self._lighttpd_path('bin', 'lighttpd')
-
- def _path_to_lighttpd_modules(self):
- return self._lighttpd_path('lib')
-
- def _path_to_lighttpd_php(self):
- return self._lighttpd_path('bin', 'php-cgi')
-
- def _path_to_driver(self, configuration=None):
- # FIXME: make |configuration| happy with case-sensitive file systems.
- return self._build_path_with_configuration(configuration, self.driver_name() + '.app', 'Contents', 'MacOS', self.driver_name())
-
- def _path_to_helper(self):
- binary_name = 'LayoutTestHelper'
- return self._build_path(binary_name)
-
- def _path_to_wdiff(self):
- return 'wdiff'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
deleted file mode 100644
index d10326f0e..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_mac_unittest.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest
-
-from webkitpy.layout_tests.port import chromium_mac
-from webkitpy.layout_tests.port import chromium_port_testcase
-from webkitpy.tool.mocktool import MockOptions
-
-
-class ChromiumMacPortTest(chromium_port_testcase.ChromiumPortTestCase):
- os_name = 'mac'
- os_version = 'snowleopard'
- port_name = 'chromium-mac'
- port_maker = chromium_mac.ChromiumMacPort
-
- def assert_name(self, port_name, os_version_string, expected):
- port = self.make_port(os_version=os_version_string, port_name=port_name)
- self.assertEqual(expected, port.name())
-
- def test_versions(self):
- self.assertTrue(self.make_port().name() in ('chromium-mac-snowleopard', 'chromium-mac-lion', 'chromium-mac-mountainlion', 'chromium-mac-future'))
-
- self.assert_name(None, 'snowleopard', 'chromium-mac-snowleopard')
- self.assert_name('chromium-mac', 'snowleopard', 'chromium-mac-snowleopard')
- self.assert_name('chromium-mac-snowleopard', 'leopard', 'chromium-mac-snowleopard')
- self.assert_name('chromium-mac-snowleopard', 'snowleopard', 'chromium-mac-snowleopard')
-
- self.assert_name(None, 'lion', 'chromium-mac-lion')
- self.assert_name(None, 'mountainlion', 'chromium-mac-mountainlion')
- self.assert_name(None, 'future', 'chromium-mac-future')
-
- self.assert_name('chromium-mac', 'lion', 'chromium-mac-lion')
- self.assert_name('chromium-mac-future', 'snowleopard', 'chromium-mac-future')
- self.assert_name('chromium-mac-future', 'lion', 'chromium-mac-future')
- self.assert_name('chromium-mac-future', 'mountainlion', 'chromium-mac-future')
-
- self.assertRaises(AssertionError, self.assert_name, None, 'tiger', 'should-raise-assertion-so-this-value-does-not-matter')
-
- def test_baseline_path(self):
- port = self.make_port(port_name='chromium-mac-snowleopard')
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac-snowleopard'))
-
- port = self.make_port(port_name='chromium-mac-lion')
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac-lion'))
-
- port = self.make_port(port_name='chromium-mac-mountainlion')
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac'))
-
- port = self.make_port(port_name='chromium-mac-future')
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-mac'))
-
- def test_operating_system(self):
- self.assertEqual('mac', self.make_port().operating_system())
-
- def test_build_path(self):
- # Test that optional paths are used regardless of whether they exist.
- options = MockOptions(configuration='Release', build_directory='/foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
-
- # Test that optional relative paths are returned unmodified.
- options = MockOptions(configuration='Release', build_directory='foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
-
- # Test that we look in a chromium directory before the webkit directory.
- options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
-
- # Test that we prefer the legacy dir over the new dir.
- options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/xcodebuild/Release', '/mock-checkout/Source/WebKit/chromium/out/Release'], '/mock-checkout/Source/WebKit/chromium/xcodebuild/Release')
-
- def test_driver_name_option(self):
- self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree'))
- self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver'))
-
- def test_path_to_image_diff(self):
- self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/ImageDiff')
-
-
-if __name__ == '__main__':
- port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
deleted file mode 100644
index dc5706beb..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
+++ /dev/null
@@ -1,223 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import unittest
-
-from webkitpy.common.system import logtesting
-from webkitpy.common.system.executive_mock import MockExecutive2
-from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.tool.mocktool import MockOptions
-
-import chromium_android
-import chromium_linux
-import chromium_mac
-import chromium_win
-
-from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port import port_testcase
-
-
-class ChromiumPortTestCase(port_testcase.PortTestCase):
-
- def test_check_build(self):
- port = self.make_port()
- port.check_build(needs_http=True)
-
- def test_default_max_locked_shards(self):
- port = self.make_port()
- port.default_child_processes = lambda: 16
- self.assertEqual(port.default_max_locked_shards(), 4)
- port.default_child_processes = lambda: 2
- self.assertEqual(port.default_max_locked_shards(), 1)
-
- def test_default_timeout_ms(self):
- self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
- self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
-
- def test_default_pixel_tests(self):
- self.assertEqual(self.make_port().default_pixel_tests(), True)
-
- def test_missing_symbol_to_skipped_tests(self):
- # Test that we get the chromium skips and not the webkit default skips
- port = self.make_port()
- skip_dict = port._missing_symbol_to_skipped_tests()
- self.assertTrue('ff_mp3_decoder' in skip_dict)
- self.assertFalse('WebGLShader' in skip_dict)
-
- def test_all_test_configurations(self):
- """Validate the complete set of configurations this port knows about."""
- port = self.make_port()
- self.assertEqual(set(port.all_test_configurations()), set([
- TestConfiguration('icecreamsandwich', 'x86', 'debug'),
- TestConfiguration('icecreamsandwich', 'x86', 'release'),
- TestConfiguration('snowleopard', 'x86', 'debug'),
- TestConfiguration('snowleopard', 'x86', 'release'),
- TestConfiguration('lion', 'x86', 'debug'),
- TestConfiguration('lion', 'x86', 'release'),
- TestConfiguration('mountainlion', 'x86', 'debug'),
- TestConfiguration('mountainlion', 'x86', 'release'),
- TestConfiguration('xp', 'x86', 'debug'),
- TestConfiguration('xp', 'x86', 'release'),
- TestConfiguration('win7', 'x86', 'debug'),
- TestConfiguration('win7', 'x86', 'release'),
- TestConfiguration('lucid', 'x86', 'debug'),
- TestConfiguration('lucid', 'x86', 'release'),
- TestConfiguration('lucid', 'x86_64', 'debug'),
- TestConfiguration('lucid', 'x86_64', 'release'),
- ]))
-
- class TestMacPort(chromium_mac.ChromiumMacPort):
- def __init__(self, options=None):
- options = options or MockOptions()
- chromium_mac.ChromiumMacPort.__init__(self, MockSystemHost(os_name='mac', os_version='leopard'), 'chromium-mac-leopard', options=options)
-
- def default_configuration(self):
- self.default_configuration_called = True
- return 'default'
-
- class TestAndroidPort(chromium_android.ChromiumAndroidPort):
- def __init__(self, options=None):
- options = options or MockOptions()
- chromium_android.ChromiumAndroidPort.__init__(self, MockSystemHost(os_name='android', os_version='icecreamsandwich'), 'chromium-android', options=options)
-
- def default_configuration(self):
- self.default_configuration_called = True
- return 'default'
-
- class TestLinuxPort(chromium_linux.ChromiumLinuxPort):
- def __init__(self, options=None):
- options = options or MockOptions()
- chromium_linux.ChromiumLinuxPort.__init__(self, MockSystemHost(os_name='linux', os_version='lucid'), 'chromium-linux-x86', options=options)
-
- def default_configuration(self):
- self.default_configuration_called = True
- return 'default'
-
- class TestWinPort(chromium_win.ChromiumWinPort):
- def __init__(self, options=None):
- options = options or MockOptions()
- chromium_win.ChromiumWinPort.__init__(self, MockSystemHost(os_name='win', os_version='xp'), 'chromium-win-xp', options=options)
-
- def default_configuration(self):
- self.default_configuration_called = True
- return 'default'
-
- def test_default_configuration(self):
- mock_options = MockOptions()
- port = ChromiumPortTestCase.TestLinuxPort(options=mock_options)
- self.assertEqual(mock_options.configuration, 'default') # pylint: disable-msg=E1101
- self.assertTrue(port.default_configuration_called)
-
- mock_options = MockOptions(configuration=None)
- port = ChromiumPortTestCase.TestLinuxPort(mock_options)
- self.assertEqual(mock_options.configuration, 'default') # pylint: disable-msg=E1101
- self.assertTrue(port.default_configuration_called)
-
- def test_diff_image(self):
- class TestPort(ChromiumPortTestCase.TestLinuxPort):
- def _path_to_image_diff(self):
- return "/path/to/image_diff"
-
- port = ChromiumPortTestCase.TestLinuxPort()
- mock_image_diff = "MOCK Image Diff"
-
- def mock_run_command(args):
- port._filesystem.write_binary_file(args[4], mock_image_diff)
- return 1
-
- # Images are different.
- port._executive = MockExecutive2(run_command_fn=mock_run_command)
- self.assertEqual(mock_image_diff, port.diff_image("EXPECTED", "ACTUAL")[0])
-
- # Images are the same.
- port._executive = MockExecutive2(exit_code=0)
- self.assertEqual(None, port.diff_image("EXPECTED", "ACTUAL")[0])
-
- # There was some error running image_diff.
- port._executive = MockExecutive2(exit_code=2)
- exception_raised = False
- try:
- port.diff_image("EXPECTED", "ACTUAL")
- except ValueError, e:
- exception_raised = True
- self.assertFalse(exception_raised)
-
- def test_diff_image_crashed(self):
- port = ChromiumPortTestCase.TestLinuxPort()
- port._executive = MockExecutive2(exit_code=2)
- self.assertEqual(port.diff_image("EXPECTED", "ACTUAL"), (None, 0, 'image diff returned an exit code of 2'))
-
- def test_expectations_files(self):
- port = self.make_port()
- port.port_name = 'chromium'
-
- expectations_path = port.path_to_test_expectations_file()
- chromium_overrides_path = port.path_from_chromium_base(
- 'webkit', 'tools', 'layout_tests', 'test_expectations.txt')
- skia_overrides_path = port.path_from_chromium_base(
- 'skia', 'skia_test_expectations.txt')
-
- port._filesystem.write_text_file(skia_overrides_path, 'dummay text')
-
- port._options.builder_name = 'DUMMY_BUILDER_NAME'
- self.assertEqual(port.expectations_files(), [expectations_path, skia_overrides_path, chromium_overrides_path])
-
- port._options.builder_name = 'builder (deps)'
- self.assertEqual(port.expectations_files(), [expectations_path, skia_overrides_path, chromium_overrides_path])
-
- # A builder which does NOT observe the Chromium test_expectations,
- # but still observes the Skia test_expectations...
- port._options.builder_name = 'builder'
- self.assertEqual(port.expectations_files(), [expectations_path, skia_overrides_path])
-
- def test_expectations_ordering(self):
- # since we don't implement self.port_name in ChromiumPort.
- pass
-
-
-class ChromiumPortLoggingTest(logtesting.LoggingTestCase):
- def test_check_sys_deps(self):
- port = ChromiumPortTestCase.TestLinuxPort()
-
- # Success
- port._executive = MockExecutive2(exit_code=0)
- self.assertTrue(port.check_sys_deps(needs_http=False))
-
- # Failure
- port._executive = MockExecutive2(exit_code=1,
- output='testing output failure')
- self.assertFalse(port.check_sys_deps(needs_http=False))
- self.assertLog([
- 'ERROR: System dependencies check failed.\n',
- 'ERROR: To override, invoke with --nocheck-sys-deps\n',
- 'ERROR: \n',
- 'ERROR: testing output failure\n'])
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
deleted file mode 100644
index ac9bc90f6..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_unittest.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import StringIO
-import time
-import unittest
-
-from webkitpy.common.system import logtesting
-from webkitpy.common.system.executive_mock import MockExecutive2
-from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.thirdparty.mock import Mock
-from webkitpy.tool.mocktool import MockOptions
-
-import chromium
-import chromium_mac
-
-from webkitpy.layout_tests.port import chromium_port_testcase
-from webkitpy.layout_tests.port.driver import DriverInput
-
-
-class ChromiumPortLoggingTest(logtesting.LoggingTestCase):
-
- # FIXME: put this someplace more useful
- def test_check_sys_deps(self):
- port = chromium_port_testcase.ChromiumPortTestCase.TestLinuxPort()
-
- # Success
- port._executive = MockExecutive2(exit_code=0)
- self.assertTrue(port.check_sys_deps(needs_http=False))
-
- # Failure
- port._executive = MockExecutive2(exit_code=1,
- output='testing output failure')
- self.assertFalse(port.check_sys_deps(needs_http=False))
- self.assertLog([
- 'ERROR: System dependencies check failed.\n',
- 'ERROR: To override, invoke with --nocheck-sys-deps\n',
- 'ERROR: \n',
- 'ERROR: testing output failure\n'])
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
deleted file mode 100755
index 3266c3914..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Chromium Win implementation of the Port interface."""
-
-import os
-import logging
-
-import chromium
-
-
-_log = logging.getLogger(__name__)
-
-
-class ChromiumWinPort(chromium.ChromiumPort):
- port_name = 'chromium-win'
-
- # FIXME: Figure out how to unify this with base.TestConfiguration.all_systems()?
- SUPPORTED_VERSIONS = ('xp', 'win7')
-
- FALLBACK_PATHS = {
- 'xp': [
- 'chromium-win-xp',
- 'chromium-win',
- 'chromium',
- ],
- 'win7': [
- 'chromium-win',
- 'chromium',
- ],
- }
-
- DEFAULT_BUILD_DIRECTORIES = ('build', 'out')
-
- @classmethod
- def determine_full_port_name(cls, host, options, port_name):
- if port_name.endswith('-win'):
- assert host.platform.is_win()
- # We don't maintain separate baselines for vista, so we pretend it is win7.
- if host.platform.os_version in ('vista', '7sp0', '7sp1', 'future'):
- version = 'win7'
- else:
- version = host.platform.os_version
- port_name = port_name + '-' + version
- return port_name
-
- def __init__(self, host, port_name, **kwargs):
- chromium.ChromiumPort.__init__(self, host, port_name, **kwargs)
- self._version = port_name[port_name.index('chromium-win-') + len('chromium-win-'):]
- assert self._version in self.SUPPORTED_VERSIONS, "%s is not in %s" % (self._version, self.SUPPORTED_VERSIONS)
-
- def setup_environ_for_server(self, server_name=None):
- env = chromium.ChromiumPort.setup_environ_for_server(self, server_name)
-
- # FIXME: lighttpd depends on some environment variable we're not whitelisting.
- # We should add the variable to an explicit whitelist in base.Port.
- # FIXME: This is a temporary hack to get the cr-win bot online until
- # someone from the cr-win port can take a look.
- for key, value in os.environ.items():
- if key not in env:
- env[key] = value
-
- # Put the cygwin directory first in the path to find cygwin1.dll.
- env["PATH"] = "%s;%s" % (self.path_from_chromium_base("third_party", "cygwin", "bin"), env["PATH"])
- # Configure the cygwin directory so that pywebsocket finds proper
- # python executable to run cgi program.
- env["CYGWIN_PATH"] = self.path_from_chromium_base("third_party", "cygwin", "bin")
- if self.get_option('register_cygwin'):
- setup_mount = self.path_from_chromium_base("third_party", "cygwin", "setup_mount.bat")
- self._executive.run_command([setup_mount]) # Paths are all absolute, so this does not require a cwd.
- return env
-
- def _modules_to_search_for_symbols(self):
- # FIXME: we should return the path to the ffmpeg equivalents to detect if we have the mp3 and aac codecs installed.
- # See https://bugs.webkit.org/show_bug.cgi?id=89706.
- return []
-
- def check_build(self, needs_http):
- result = chromium.ChromiumPort.check_build(self, needs_http)
- if not result:
- _log.error('For complete Windows build requirements, please see:')
- _log.error('')
- _log.error(' http://dev.chromium.org/developers/how-tos/build-instructions-windows')
- return result
-
- def operating_system(self):
- return 'win'
-
- def relative_test_filename(self, filename):
- path = filename[len(self.layout_tests_dir()) + 1:]
- return path.replace('\\', '/')
-
- #
- # PROTECTED ROUTINES
- #
-
- def _uses_apache(self):
- return False
-
- def _lighttpd_path(self, *comps):
- return self.path_from_chromium_base('third_party', 'lighttpd', 'win', *comps)
-
- def _path_to_apache(self):
- return self.path_from_chromium_base('third_party', 'cygwin', 'usr', 'sbin', 'httpd')
-
- def _path_to_apache_config_file(self):
- return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'cygwin-httpd.conf')
-
- def _path_to_lighttpd(self):
- return self._lighttpd_path('LightTPD.exe')
-
- def _path_to_lighttpd_modules(self):
- return self._lighttpd_path('lib')
-
- def _path_to_lighttpd_php(self):
- return self._lighttpd_path('php5', 'php-cgi.exe')
-
- def _path_to_driver(self, configuration=None):
- binary_name = '%s.exe' % self.driver_name()
- return self._build_path_with_configuration(configuration, binary_name)
-
- def _path_to_helper(self):
- binary_name = 'LayoutTestHelper.exe'
- return self._build_path(binary_name)
-
- def _path_to_image_diff(self):
- binary_name = 'ImageDiff.exe'
- return self._build_path(binary_name)
-
- def _path_to_wdiff(self):
- return self.path_from_chromium_base('third_party', 'cygwin', 'bin', 'wdiff.exe')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
deleted file mode 100644
index a84b5ee15..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_win_unittest.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import os
-import unittest
-
-from webkitpy.common.system import outputcapture
-from webkitpy.common.system.executive_mock import MockExecutive
-from webkitpy.common.system.filesystem_mock import MockFileSystem
-from webkitpy.layout_tests.port import chromium_port_testcase
-from webkitpy.layout_tests.port import chromium_win
-from webkitpy.tool.mocktool import MockOptions
-
-
-class ChromiumWinTest(chromium_port_testcase.ChromiumPortTestCase):
- port_name = 'chromium-win'
- port_maker = chromium_win.ChromiumWinPort
- os_name = 'win'
- os_version = 'xp'
-
- def test_uses_apache(self):
- self.assertFalse(self.make_port()._uses_apache())
-
- def test_setup_environ_for_server(self):
- port = self.make_port()
- port._executive = MockExecutive(should_log=True)
- output = outputcapture.OutputCapture()
- # FIXME: This test should not use the real os.environ
- orig_environ = os.environ.copy()
- env = output.assert_outputs(self, port.setup_environ_for_server)
- self.assertEqual(orig_environ["PATH"], os.environ["PATH"])
- self.assertNotEqual(env["PATH"], os.environ["PATH"])
-
- def test_setup_environ_for_server_cygpath(self):
- port = self.make_port()
- env = port.setup_environ_for_server(port.driver_name())
- self.assertEqual(env['CYGWIN_PATH'], '/mock-checkout/Source/WebKit/chromium/third_party/cygwin/bin')
-
- def test_setup_environ_for_server_register_cygwin(self):
- port = self.make_port(options=MockOptions(register_cygwin=True, results_directory='/'))
- port._executive = MockExecutive(should_log=True)
- expected_logs = "MOCK run_command: ['/mock-checkout/Source/WebKit/chromium/third_party/cygwin/setup_mount.bat'], cwd=None\n"
- output = outputcapture.OutputCapture()
- output.assert_outputs(self, port.setup_environ_for_server, expected_logs=expected_logs)
-
- def assert_name(self, port_name, os_version_string, expected):
- port = self.make_port(port_name=port_name, os_version=os_version_string)
- self.assertEqual(expected, port.name())
-
- def test_versions(self):
- port = self.make_port()
- self.assertTrue(port.name() in ('chromium-win-xp', 'chromium-win-win7'))
-
- self.assert_name(None, 'xp', 'chromium-win-xp')
- self.assert_name('chromium-win', 'xp', 'chromium-win-xp')
- self.assert_name('chromium-win-xp', 'xp', 'chromium-win-xp')
- self.assert_name('chromium-win-xp', '7sp0', 'chromium-win-xp')
-
- self.assert_name(None, '7sp0', 'chromium-win-win7')
- self.assert_name(None, 'vista', 'chromium-win-win7')
- self.assert_name('chromium-win', '7sp0', 'chromium-win-win7')
- self.assert_name('chromium-win-win7', 'xp', 'chromium-win-win7')
- self.assert_name('chromium-win-win7', '7sp0', 'chromium-win-win7')
- self.assert_name('chromium-win-win7', 'vista', 'chromium-win-win7')
-
- self.assertRaises(AssertionError, self.assert_name, None, 'w2k', 'chromium-win-xp')
-
- def test_baseline_path(self):
- port = self.make_port(port_name='chromium-win-xp')
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-win-xp'))
-
- port = self.make_port(port_name='chromium-win-win7')
- self.assertEqual(port.baseline_path(), port._webkit_baseline_path('chromium-win'))
-
- def test_build_path(self):
- # Test that optional paths are used regardless of whether they exist.
- options = MockOptions(configuration='Release', build_directory='/foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], '/foo/Release')
-
- # Test that optional relative paths are returned unmodified.
- options = MockOptions(configuration='Release', build_directory='foo')
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release'], 'foo/Release')
-
- # Test that we look in a chromium directory before the webkit directory.
- options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/out/Release', '/mock-checkout/out/Release'], '/mock-checkout/Source/WebKit/chromium/out/Release')
-
- # Test that we prefer the legacy dir over the new dir.
- options = MockOptions(configuration='Release', build_directory=None)
- self.assert_build_path(options, ['/mock-checkout/Source/WebKit/chromium/build/Release', '/mock-checkout/Source/WebKit/chromium/out'], '/mock-checkout/Source/WebKit/chromium/build/Release')
-
- def test_operating_system(self):
- self.assertEqual('win', self.make_port().operating_system())
-
- def test_driver_name_option(self):
- self.assertTrue(self.make_port()._path_to_driver().endswith('DumpRenderTree.exe'))
- self.assertTrue(self.make_port(options=MockOptions(driver_name='OtherDriver'))._path_to_driver().endswith('OtherDriver.exe'))
-
- def test_path_to_image_diff(self):
- self.assertEqual(self.make_port()._path_to_image_diff(), '/mock-checkout/out/Release/ImageDiff.exe')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win.py b/Tools/Scripts/webkitpy/layout_tests/port/win.py
deleted file mode 100644
index ff473fec2..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/win.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (C) 2010 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the Google name nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import re
-import sys
-
-from webkitpy.common.system.systemhost import SystemHost
-from webkitpy.common.system.executive import ScriptError, Executive
-from webkitpy.common.system.path import abspath_to_uri
-from webkitpy.layout_tests.port.apple import ApplePort
-
-
-_log = logging.getLogger(__name__)
-
-
-class WinPort(ApplePort):
- port_name = "win"
-
- VERSION_FALLBACK_ORDER = ["win-xp", "win-vista", "win-7sp0", "win-win7"]
-
- ARCHITECTURES = ['x86']
-
- def do_text_results_differ(self, expected_text, actual_text):
- # Sanity was restored in WK2, so we don't need this hack there.
- if self.get_option('webkit_test_runner'):
- return ApplePort.do_text_results_differ(self, expected_text, actual_text)
-
- # This is a hack (which dates back to ORWT).
- # Windows does not have an EDITING DELEGATE, so we strip any EDITING DELEGATE
- # messages to make more of the tests pass.
- # It's possible more of the ports might want this and this could move down into WebKitPort.
- delegate_regexp = re.compile("^EDITING DELEGATE: .*?\n", re.MULTILINE)
- expected_text = delegate_regexp.sub("", expected_text)
- actual_text = delegate_regexp.sub("", actual_text)
- return expected_text != actual_text
-
- def default_baseline_search_path(self):
- if self._name.endswith(self.FUTURE_VERSION):
- fallback_names = [self.port_name]
- else:
- fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(self._name):-1] + [self.port_name]
- # FIXME: The AppleWin port falls back to AppleMac for some results. Eventually we'll have a shared 'apple' port.
- if self.get_option('webkit_test_runner'):
- fallback_names.insert(0, 'win-wk2')
- fallback_names.append('mac-wk2')
- # Note we do not add 'wk2' here, even though it's included in _skipped_search_paths().
- # FIXME: Perhaps we should get this list from MacPort?
- fallback_names.extend(['mac-lion', 'mac'])
- return map(self._webkit_baseline_path, fallback_names)
-
- def operating_system(self):
- return 'win'
-
- def show_results_html_file(self, results_filename):
- self._run_script('run-safari', [abspath_to_uri(SystemHost().platform, results_filename)])
-
- # FIXME: webkitperl/httpd.pm installs /usr/lib/apache/libphp4.dll on cycwin automatically
- # as part of running old-run-webkit-tests. That's bad design, but we may need some similar hack.
- # We might use setup_environ_for_server for such a hack (or modify apache_http_server.py).
-
- def _runtime_feature_list(self):
- supported_features_command = [self._path_to_driver(), '--print-supported-features']
- try:
- output = self._executive.run_command(supported_features_command, error_handler=Executive.ignore_error)
- except OSError, e:
- _log.warn("Exception running driver: %s, %s. Driver must be built before calling WebKitPort.test_expectations()." % (supported_features_command, e))
- return None
-
- # Note: win/DumpRenderTree.cpp does not print a leading space before the features_string.
- match_object = re.match("SupportedFeatures:\s*(?P<features_string>.*)\s*", output)
- if not match_object:
- return None
- return match_object.group('features_string').split(' ')
diff --git a/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py b/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
index 717bc7ce6..85b18e2b1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/reftests/extract_reference_link_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -24,7 +23,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.layout_tests.reftests import extract_reference_link
@@ -56,10 +55,10 @@ CONTENT OF TEST
</html>
"""
matches, mismatches = extract_reference_link.get_reference_link(html_1)
- self.assertEqual(matches,
- ["green-box-ref.xht", "blue-box-ref.xht"])
- self.assertEqual(mismatches,
- ["red-box-notref.xht", "red-box-notref.xht"])
+ self.assertItemsEqual(matches,
+ ["green-box-ref.xht", "blue-box-ref.xht"])
+ self.assertItemsEqual(mismatches,
+ ["red-box-notref.xht", "red-box-notref.xht"])
html_2 = ""
empty_tuple_1 = extract_reference_link.get_reference_link(html_2)
@@ -79,7 +78,3 @@ CONTENT OF TEST
html_5 = """<link rel="help" href="RELEVANT_SPEC_SECTION">"""
empty_tuple_4 = extract_reference_link.get_reference_link(html_5)
self.assertEqual(empty_tuple_4, ([], []))
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index c505cd399..6b4bb76d1 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
@@ -29,7 +28,6 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import errno
import logging
import optparse
import os
@@ -38,10 +36,9 @@ import sys
import traceback
from webkitpy.common.host import Host
-from webkitpy.common.system import stack_utils
-from webkitpy.layout_tests.controllers.manager import Manager, WorkerException, TestRunInterruptedException
-from webkitpy.layout_tests.models import test_expectations
-from webkitpy.layout_tests.port import configuration_options, platform_options
+from webkitpy.layout_tests.controllers.manager import Manager
+from webkitpy.port import configuration_options, platform_options
+from webkitpy.layout_tests.views import buildbot_results
from webkitpy.layout_tests.views import printing
@@ -56,165 +53,52 @@ INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
EXCEPTIONAL_EXIT_STATUS = 254
-def lint(port, options):
- host = port.host
- if options.platform:
- ports_to_lint = [port]
- else:
- ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names()]
-
- files_linted = set()
- lint_failed = False
-
- for port_to_lint in ports_to_lint:
- expectations_dict = port_to_lint.expectations_dict()
- for expectations_file in expectations_dict.keys():
- if expectations_file in files_linted:
- continue
-
- try:
- test_expectations.TestExpectations(port_to_lint, expectations_to_lint={expectations_file: expectations_dict[expectations_file]})
- except test_expectations.ParseError, e:
- lint_failed = True
- _log.error('')
- for warning in e.warnings:
- _log.error(warning)
- _log.error('')
- files_linted.add(expectations_file)
-
- if lint_failed:
- _log.error('Lint failed.')
- return -1
- _log.info('Lint succeeded.')
- return 0
-
-
-def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdout):
- try:
- warnings = _set_up_derived_options(port, options)
-
- printer = printing.Printer(port, options, regular_output, buildbot_output, logger=logging.getLogger())
-
- for warning in warnings:
- _log.warning(warning)
-
- if options.lint_test_files:
- return lint(port, options)
-
- # We wrap any parts of the run that are slow or likely to raise exceptions
- # in a try/finally to ensure that we clean up the logging configuration.
- unexpected_result_count = -1
-
- manager = Manager(port, options, printer)
- printer.print_config(port.results_directory())
-
- unexpected_result_count = manager.run(args)
- _log.debug("Testing completed, Exit status: %d" % unexpected_result_count)
- except Exception:
- exception_type, exception_value, exception_traceback = sys.exc_info()
- if exception_type not in (KeyboardInterrupt, TestRunInterruptedException, WorkerException):
- print >> sys.stderr, '\n%s raised: %s' % (exception_type.__name__, exception_value)
- stack_utils.log_traceback(_log.error, exception_traceback)
- raise
- finally:
- printer.cleanup()
-
- return unexpected_result_count
-
-
-def _set_up_derived_options(port, options):
- """Sets the options values that depend on other options values."""
- # We return a list of warnings to print after the printer is initialized.
- warnings = []
-
- if not options.child_processes:
- options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
- str(port.default_child_processes()))
- if not options.max_locked_shards:
- options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
- str(port.default_max_locked_shards())))
-
- if not options.configuration:
- options.configuration = port.default_configuration()
-
- if options.pixel_tests is None:
- options.pixel_tests = port.default_pixel_tests()
-
- if not options.time_out_ms:
- options.time_out_ms = str(port.default_timeout_ms())
-
- options.slow_time_out_ms = str(5 * int(options.time_out_ms))
-
- if options.additional_platform_directory:
- additional_platform_directories = []
- for path in options.additional_platform_directory:
- additional_platform_directories.append(port.host.filesystem.abspath(path))
- options.additional_platform_directory = additional_platform_directories
-
- if not options.http and options.skipped in ('ignore', 'only'):
- warnings.append("--force/--skipped=%s overrides --no-http." % (options.skipped))
- options.http = True
-
- if options.ignore_metrics and (options.new_baseline or options.reset_results):
- warnings.append("--ignore-metrics has no effect with --new-baselines or with --reset-results")
-
- if options.new_baseline:
- options.reset_results = True
- options.add_platform_exceptions = True
-
- if options.pixel_test_directories:
- options.pixel_tests = True
- varified_dirs = set()
- pixel_test_directories = options.pixel_test_directories
- for directory in pixel_test_directories:
- # FIXME: we should support specifying the directories all the ways we support it for additional
- # arguments specifying which tests and directories to run. We should also move the logic for that
- # to Port.
- filesystem = port.host.filesystem
- if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
- warnings.append("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
- else:
- varified_dirs.add(directory)
-
- options.pixel_test_directories = list(varified_dirs)
-
- if options.run_singly:
- options.verbose = True
-
- return warnings
-
-
-def _compat_shim_callback(option, opt_str, value, parser):
- print "Ignoring unsupported option: %s" % opt_str
+def main(argv, stdout, stderr):
+ options, args = parse_args(argv)
+ if options.platform and 'test' in options.platform:
+ # It's a bit lame to import mocks into real code, but this allows the user
+ # to run tests against the test platform interactively, which is useful for
+ # debugging test failures.
+ from webkitpy.common.host_mock import MockHost
+ host = MockHost()
+ else:
+ host = Host()
-def _compat_shim_option(option_name, **kwargs):
- return optparse.make_option(option_name, action="callback",
- callback=_compat_shim_callback,
- help="Ignored, for old-run-webkit-tests compat only.", **kwargs)
+ if options.lint_test_files:
+ from webkitpy.layout_tests.lint_test_expectations import lint
+ return lint(host, options, stderr)
+ try:
+ port = host.port_factory.get(options.platform, options)
+ except NotImplementedError, e:
+ # FIXME: is this the best way to handle unsupported port names?
+ print >> stderr, str(e)
+ return EXCEPTIONAL_EXIT_STATUS
-def parse_args(args=None):
- """Provides a default set of command line args.
+ try:
+ run_details = run(port, options, args, stderr)
+ if run_details.exit_code != -1:
+ bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug_rwt_logging)
+ bot_printer.print_results(run_details)
+
+ return run_details.exit_code
+ except KeyboardInterrupt:
+ return INTERRUPTED_EXIT_STATUS
+ except BaseException as e:
+ if isinstance(e, Exception):
+ print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
+ traceback.print_exc(file=stderr)
+ return EXCEPTIONAL_EXIT_STATUS
- Returns a tuple of options, args from optparse"""
+def parse_args(args):
option_group_definitions = []
option_group_definitions.append(("Platform options", platform_options()))
option_group_definitions.append(("Configuration options", configuration_options()))
option_group_definitions.append(("Printing Options", printing.print_options()))
- # FIXME: These options should move onto the ChromiumPort.
- option_group_definitions.append(("Chromium-specific Options", [
- optparse.make_option("--nocheck-sys-deps", action="store_true",
- default=False,
- help="Don't check the system dependencies (themes)"),
- optparse.make_option("--adb-device",
- action="append", default=[],
- help="Run Android layout tests on these devices."),
- ]))
-
option_group_definitions.append(("EFL-specific Options", [
optparse.make_option("--webprocess-cmd-prefix", type="string",
default=False, help="Prefix used when spawning the Web process (Debug mode only)"),
@@ -238,16 +122,10 @@ def parse_args(args=None):
help="Path to a directory containing the executables needed to run tests."),
]))
- option_group_definitions.append(("ORWT Compatibility Options", [
- # FIXME: Remove this option once the bots don't refer to it.
- # results.html is smart enough to figure this out itself.
- _compat_shim_option("--use-remote-links-to-tests"),
- ]))
-
option_group_definitions.append(("Results Options", [
- optparse.make_option("-p", "--pixel-tests", action="store_true",
+ optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_true",
dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"),
- optparse.make_option("--no-pixel-tests", action="store_false",
+ optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_false",
dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"),
optparse.make_option("--no-sample-on-timeout", action="store_false",
dest="sample_on_timeout", help="Don't run sample on timeout (Mac OS X only)"),
@@ -303,18 +181,11 @@ def parse_args(args=None):
default=True, dest="show_results",
help="Don't launch a browser with results after the tests "
"are done"),
- # FIXME: We should have a helper function to do this sort of
- # deprectated mapping and automatically log, etc.
- optparse.make_option("--noshow-results", action="store_false", dest="show_results", help="Deprecated, same as --no-show-results."),
- optparse.make_option("--no-launch-safari", action="store_false", dest="show_results", help="Deprecated, same as --no-show-results."),
optparse.make_option("--full-results-html", action="store_true",
default=False,
help="Show all failures in results.html, rather than only regressions"),
optparse.make_option("--clobber-old-results", action="store_true",
default=False, help="Clobbers test results from previous runs."),
- optparse.make_option("--no-record-results", action="store_false",
- default=True, dest="record_results",
- help="Don't record the results."),
optparse.make_option("--http", action="store_true", dest="http",
default=True, help="Run HTTP and WebSocket tests (default)"),
optparse.make_option("--no-http", action="store_false", dest="http",
@@ -322,6 +193,12 @@ def parse_args(args=None):
optparse.make_option("--ignore-metrics", action="store_true", dest="ignore_metrics",
default=False, help="Ignore rendering metrics related information from test "
"output, only compare the structure of the rendertree."),
+ optparse.make_option("--nocheck-sys-deps", action="store_true",
+ default=False,
+ help="Don't check the system dependencies (themes)"),
+ optparse.make_option("--nojava", action="store_true",
+ default=False,
+ help="Don't build java support files"),
]))
option_group_definitions.append(("Testing Options", [
@@ -391,6 +268,10 @@ def parse_args(args=None):
help="Set the maximum number of locked shards"),
optparse.make_option("--additional-env-var", type="string", action="append", default=[],
help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
+ optparse.make_option("--profile", action="store_true",
+ help="Output per-test profile information."),
+ optparse.make_option("--profiler", action="store",
+ help="Output per-test profile information, using the specified profiler."),
]))
option_group_definitions.append(("Miscellaneous Options", [
@@ -425,37 +306,79 @@ def parse_args(args=None):
return option_parser.parse_args(args)
-def main(argv=None):
- try:
- options, args = parse_args(argv)
- if options.platform and 'test' in options.platform:
- # It's a bit lame to import mocks into real code, but this allows the user
- # to run tests against the test platform interactively, which is useful for
- # debugging test failures.
- from webkitpy.common.host_mock import MockHost
- host = MockHost()
- else:
- host = Host()
- port = host.port_factory.get(options.platform, options)
- except NotImplementedError, e:
- # FIXME: is this the best way to handle unsupported port names?
- print >> sys.stderr, str(e)
- return EXCEPTIONAL_EXIT_STATUS
- except Exception, e:
- print >> sys.stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
- traceback.print_exc(file=sys.stderr)
- raise
+def _set_up_derived_options(port, options):
+ """Sets the options values that depend on other options values."""
+ if not options.child_processes:
+ options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES",
+ str(port.default_child_processes()))
+ if not options.max_locked_shards:
+ options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_SHARDS",
+ str(port.default_max_locked_shards())))
+
+ if not options.configuration:
+ options.configuration = port.default_configuration()
+
+ if options.pixel_tests is None:
+ options.pixel_tests = port.default_pixel_tests()
+
+ if not options.time_out_ms:
+ options.time_out_ms = str(port.default_timeout_ms())
+
+ options.slow_time_out_ms = str(5 * int(options.time_out_ms))
+
+ if options.additional_platform_directory:
+ additional_platform_directories = []
+ for path in options.additional_platform_directory:
+ additional_platform_directories.append(port.host.filesystem.abspath(path))
+ options.additional_platform_directory = additional_platform_directories
+
+ if not options.http and options.skipped in ('ignore', 'only'):
+ _log.warning("--force/--skipped=%s overrides --no-http." % (options.skipped))
+ options.http = True
+
+ if options.ignore_metrics and (options.new_baseline or options.reset_results):
+ _log.warning("--ignore-metrics has no effect with --new-baselines or with --reset-results")
+
+ if options.new_baseline:
+ options.reset_results = True
+ options.add_platform_exceptions = True
+
+ if options.pixel_test_directories:
+ options.pixel_tests = True
+ varified_dirs = set()
+ pixel_test_directories = options.pixel_test_directories
+ for directory in pixel_test_directories:
+ # FIXME: we should support specifying the directories all the ways we support it for additional
+ # arguments specifying which tests and directories to run. We should also move the logic for that
+ # to Port.
+ filesystem = port.host.filesystem
+ if not filesystem.isdir(filesystem.join(port.layout_tests_dir(), directory)):
+ _log.warning("'%s' was passed to --pixel-test-directories, which doesn't seem to be a directory" % str(directory))
+ else:
+ varified_dirs.add(directory)
- logging.getLogger().setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
- return run(port, options, args)
+ options.pixel_test_directories = list(varified_dirs)
+ if options.run_singly:
+ options.verbose = True
+
+
+def run(port, options, args, logging_stream):
+ logger = logging.getLogger()
+ logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
-if '__main__' == __name__:
try:
- return_code = main()
- except BaseException, e:
- if e.__class__ in (KeyboardInterrupt, TestRunInterruptedException):
- sys.exit(INTERRUPTED_EXIT_STATUS)
- sys.exit(EXCEPTIONAL_EXIT_STATUS)
+ printer = printing.Printer(port, options, logging_stream, logger=logger)
+
+ _set_up_derived_options(port, options)
+ manager = Manager(port, options, printer)
+ printer.print_config(port.results_directory())
+
+ run_details = manager.run(args)
+ _log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
+ return run_details
+ finally:
+ printer.cleanup()
- sys.exit(return_code)
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr))
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index 2bfe904d5..c8d3495e4 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
# Copyright (C) 2011 Apple Inc. All rights reserved.
@@ -30,7 +29,6 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
-import itertools
import json
import logging
import os
@@ -42,7 +40,7 @@ import sys
import thread
import time
import threading
-import unittest
+import unittest2 as unittest
from webkitpy.common.system import outputcapture, path
from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
@@ -50,22 +48,19 @@ from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.host import Host
from webkitpy.common.host_mock import MockHost
-from webkitpy.layout_tests import port
+from webkitpy import port
from webkitpy.layout_tests import run_webkit_tests
-from webkitpy.layout_tests.controllers.manager import WorkerException
-from webkitpy.layout_tests.port import Port
-from webkitpy.layout_tests.port.test import TestPort, TestDriver
+from webkitpy.port import Port
+from webkitpy.port import test
from webkitpy.test.skip import skip_if
from webkitpy.tool.mocktool import MockOptions
-def parse_args(extra_args=None, record_results=False, tests_included=False, new_results=False, print_nothing=True):
+def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
extra_args = extra_args or []
args = []
if not '--platform' in extra_args:
args.extend(['--platform', 'test'])
- if not record_results:
- args.append('--no-record-results')
if not new_results:
args.append('--no-new-test-results')
@@ -81,8 +76,8 @@ def parse_args(extra_args=None, record_results=False, tests_included=False, new_
return run_webkit_tests.parse_args(args)
-def passing_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, shared_port=True):
- options, parsed_args = parse_args(extra_args, record_results, tests_included)
+def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
+ options, parsed_args = parse_args(extra_args, tests_included)
if not port_obj:
host = host or MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
@@ -90,23 +85,21 @@ def passing_run(extra_args=None, port_obj=None, record_results=False, tests_incl
if shared_port:
port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
- buildbot_output = StringIO.StringIO()
- regular_output = StringIO.StringIO()
- res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
- return res == 0
+ logging_stream = StringIO.StringIO()
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ return run_details.exit_code == 0
-def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, new_results=False, shared_port=True):
+def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
options, parsed_args = parse_args(extra_args=extra_args,
- record_results=record_results,
tests_included=tests_included,
print_nothing=False, new_results=new_results)
host = host or MockHost()
if not port_obj:
port_obj = host.port_factory.get(port_name=options.platform, options=options)
- res, buildbot_output, regular_output = run_and_capture(port_obj, options, parsed_args, shared_port)
- return (res, buildbot_output, regular_output, host.user)
+ run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
+ return (run_details, output, host.user)
def run_and_capture(port_obj, options, parsed_args, shared_port=True):
@@ -115,70 +108,60 @@ def run_and_capture(port_obj, options, parsed_args, shared_port=True):
oc = outputcapture.OutputCapture()
try:
oc.capture_output()
- buildbot_output = StringIO.StringIO()
- regular_output = StringIO.StringIO()
- res = run_webkit_tests.run(port_obj, options, parsed_args,
- buildbot_output=buildbot_output,
- regular_output=regular_output)
+ logging_stream = StringIO.StringIO()
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
finally:
oc.restore_output()
- return (res, buildbot_output, regular_output)
+ return (run_details, logging_stream)
-def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
- host=None, include_reference_html=False):
- extra_args = extra_args or []
- if not tests_included:
- # Not including http tests since they get run out of order (that
- # behavior has its own test, see test_get_test_file_queue)
- extra_args = ['passes', 'failures'] + extra_args
- options, parsed_args = parse_args(extra_args, tests_included=True)
+def get_tests_run(args, host=None):
+ results = get_test_results(args, host)
+ return [result.test_name for result in results]
- host = host or MockHost()
- test_batches = []
- class RecordingTestDriver(TestDriver):
- def __init__(self, port, worker_number):
- TestDriver.__init__(self, port, worker_number, pixel_tests=port.get_option('pixel_test'), no_timeout=False)
- self._current_test_batch = None
+def get_test_batches(args, host=None):
+ results = get_test_results(args, host)
+ batches = []
+ batch = []
+ current_pid = None
+ for result in results:
+ if batch and result.pid != current_pid:
+ batches.append(batch)
+ batch = []
+ batch.append(result.test_name)
+ if batch:
+ batches.append(batch)
+ return batches
- def start(self):
- pass
- def stop(self):
- self._current_test_batch = None
+def get_test_results(args, host=None):
+ options, parsed_args = parse_args(args, tests_included=True)
- def run_test(self, test_input, stop_when_done):
- if self._current_test_batch is None:
- self._current_test_batch = []
- test_batches.append(self._current_test_batch)
- test_name = test_input.test_name
- # In case of reftest, one test calls the driver's run_test() twice.
- # We should not add a reference html used by reftests to tests unless include_reference_html parameter
- # is explicitly given.
- filesystem = self._port.host.filesystem
- dirname, filename = filesystem.split(test_name)
- if include_reference_html or not Port.is_reference_html_file(filesystem, dirname, filename):
- self._current_test_batch.append(test_name)
- return TestDriver.run_test(self, test_input, stop_when_done)
-
- class RecordingTestPort(TestPort):
- def create_driver(self, worker_number):
- return RecordingTestDriver(self, worker_number)
+ host = host or MockHost()
+ port_obj = host.port_factory.get(port_name=options.platform, options=options)
- recording_port = RecordingTestPort(host, options=options)
- run_and_capture(recording_port, options, parsed_args)
+ oc = outputcapture.OutputCapture()
+ oc.capture_output()
+ logging_stream = StringIO.StringIO()
+ try:
+ run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ finally:
+ oc.restore_output()
- if flatten_batches:
- return list(itertools.chain(*test_batches))
+ all_results = []
+ if run_details.initial_results:
+ all_results.extend(run_details.initial_results.all_results)
- return test_batches
+ if run_details.retry_results:
+ all_results.extend(run_details.retry_results.all_results)
+ return all_results
-# Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
-# FIXME: It's nice to have a routine in port/test.py that returns this number.
-unexpected_failures = 12
-unexpected_tests_count = unexpected_failures + 4
+def parse_full_results(full_results_text):
+ json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+ compressed_results = json.loads(json_to_eval)
+ return compressed_results
class StreamTestingMixin(object):
@@ -192,91 +175,7 @@ class StreamTestingMixin(object):
self.assertTrue(stream.getvalue())
-class LintTest(unittest.TestCase, StreamTestingMixin):
- def test_all_configurations(self):
-
- class FakePort(object):
- def __init__(self, host, name, path):
- self.host = host
- self.name = name
- self.path = path
-
- def test_configuration(self):
- return None
-
- def expectations_dict(self):
- self.host.ports_parsed.append(self.name)
- return {self.path: ''}
-
- def skipped_layout_tests(self, tests):
- return set([])
-
- def all_test_configurations(self):
- return []
-
- def configuration_specifier_macros(self):
- return []
-
- def path_from_webkit_base(self):
- return ''
-
- def get_option(self, name, val):
- return val
-
- class FakeFactory(object):
- def __init__(self, host, ports):
- self.host = host
- self.ports = {}
- for port in ports:
- self.ports[port.name] = port
-
- def get(self, port_name, *args, **kwargs):
- return self.ports[port_name]
-
- def all_port_names(self):
- return sorted(self.ports.keys())
-
- host = MockHost()
- host.ports_parsed = []
- host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
- FakePort(host, 'b', 'path-to-b'),
- FakePort(host, 'b-win', 'path-to-b')))
-
- self.assertEqual(run_webkit_tests.lint(host.port_factory.ports['a'], MockOptions(platform=None)), 0)
- self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
-
- host.ports_parsed = []
- self.assertEqual(run_webkit_tests.lint(host.port_factory.ports['a'], MockOptions(platform='a')), 0)
- self.assertEqual(host.ports_parsed, ['a'])
-
- def test_lint_test_files(self):
- res, out, err, user = logging_run(['--lint-test-files'])
- self.assertEqual(res, 0)
- self.assertEmpty(out)
- self.assertContains(err, 'Lint succeeded')
-
- def test_lint_test_files__errors(self):
- options, parsed_args = parse_args(['--lint-test-files'])
- host = MockHost()
- port_obj = host.port_factory.get(options.platform, options=options)
- port_obj.expectations_dict = lambda: {'': '-- syntax error'}
- res, out, err = run_and_capture(port_obj, options, parsed_args)
-
- self.assertEqual(res, -1)
- self.assertEmpty(out)
- self.assertTrue(any(['Lint failed' in msg for msg in err.buflist]))
-
- # ensure we lint *all* of the files in the cascade.
- port_obj.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
- res, out, err = run_and_capture(port_obj, options, parsed_args)
-
- self.assertEqual(res, -1)
- self.assertEmpty(out)
- self.assertTrue(any(['foo:1' in msg for msg in err.buflist]))
- self.assertTrue(any(['bar:1' in msg for msg in err.buflist]))
-
-
-class MainTest(unittest.TestCase, StreamTestingMixin):
+class RunTest(unittest.TestCase, StreamTestingMixin):
def setUp(self):
# A real PlatformInfo object is used here instead of a
# MockPlatformInfo because we need to actually check for
@@ -287,15 +186,41 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# properly on cygwin (bug 63846).
self.should_test_processes = not self._platform.is_win()
- def test_all(self):
- res, out, err, user = logging_run([], tests_included=True)
- self.assertEqual(res, unexpected_tests_count)
-
def test_basic(self):
- self.assertTrue(passing_run())
+ options, args = parse_args(tests_included=True)
+ logging_stream = StringIO.StringIO()
+ host = MockHost()
+ port_obj = host.port_factory.get(options.platform, options)
+ details = run_webkit_tests.run(port_obj, options, args, logging_stream)
+
+ # These numbers will need to be updated whenever we add new tests.
+ self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
+ self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
+ self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
+ self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
+ self.assertEqual(details.retry_results.total, test.TOTAL_RETRIES)
+
+ one_line_summary = "%d tests ran as expected, %d didn't:\n" % (
+ details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name),
+ len(details.initial_results.unexpected_results_by_name))
+ self.assertTrue(one_line_summary in logging_stream.buflist)
+
+ # Ensure the results were summarized properly.
+ self.assertEqual(details.summarized_results['num_regressions'], details.exit_code)
+
+ # Ensure the image diff percentage is in the results.
+ self.assertEqual(details.summarized_results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
+
+ # Ensure the results were written out and displayed.
+ full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
+ self.assertEqual(json.loads(json_to_eval), details.summarized_results)
+
+ self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
+
def test_batch_size(self):
- batch_tests_run = get_tests_run(['--batch-size', '2'])
+ batch_tests_run = get_test_batches(['--batch-size', '2'])
for batch in batch_tests_run:
self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
@@ -307,32 +232,32 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
- _, _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
+ _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
try:
- self.assertTrue(any(['(1 locked)' in line for line in regular_output.buflist]))
+ self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
finally:
if save_env_webkit_test_max_locked_shards:
os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
def test_child_processes_2(self):
if self.should_test_processes:
- _, _, regular_output, _ = logging_run(
+ _, regular_output, _ = logging_run(
['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
def test_child_processes_min(self):
if self.should_test_processes:
- _, _, regular_output, _ = logging_run(
+ _, regular_output, _ = logging_run(
['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
tests_included=True, shared_port=False)
self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
def test_dryrun(self):
- batch_tests_run = get_tests_run(['--dry-run'])
- self.assertEqual(batch_tests_run, [])
+ tests_run = get_tests_run(['--dry-run'])
+ self.assertEqual(tests_run, [])
- batch_tests_run = get_tests_run(['-n'])
- self.assertEqual(batch_tests_run, [])
+ tests_run = get_tests_run(['-n'])
+ self.assertEqual(tests_run, [])
def test_exception_raised(self):
# Exceptions raised by a worker are treated differently depending on
@@ -342,87 +267,80 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# is actually useful in testing.
#
# Exceptions raised in a separate process are re-packaged into
- # WorkerExceptions, which have a string capture of the stack which can
+ # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
# be printed, but don't display properly in the unit test exception handlers.
- self.assertRaises(ValueError, logging_run,
+ self.assertRaises(BaseException, logging_run,
['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
if self.should_test_processes:
- self.assertRaises(WorkerException, logging_run,
+ self.assertRaises(BaseException, logging_run,
['--child-processes', '2', '--force', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
def test_full_results_html(self):
# FIXME: verify html?
- res, out, err, user = logging_run(['--full-results-html'])
- self.assertEqual(res, 0)
+ details, _, _ = logging_run(['--full-results-html'])
+ self.assertEqual(details.exit_code, 0)
def test_hung_thread(self):
- res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
- 'failures/expected/hang.html'],
- tests_included=True)
+ details, err, _ = logging_run(['--run-singly', '--time-out-ms=50', 'failures/expected/hang.html'], tests_included=True)
# Note that hang.html is marked as WontFix and all WontFix tests are
# expected to Pass, so that actually running them generates an "unexpected" error.
- self.assertEqual(res, 1)
- self.assertNotEmpty(out)
+ self.assertEqual(details.exit_code, 1)
self.assertNotEmpty(err)
def test_keyboard_interrupt(self):
# Note that this also tests running a test marked as SKIP if
# you specify it explicitly.
- self.assertRaises(KeyboardInterrupt, logging_run,
- ['failures/expected/keyboard.html', '--child-processes', '1'],
- tests_included=True)
+ self.assertRaises(KeyboardInterrupt, logging_run, ['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
if self.should_test_processes:
self.assertRaises(KeyboardInterrupt, logging_run,
['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--force'], tests_included=True, shared_port=False)
def test_no_tests_found(self):
- res, out, err, user = logging_run(['resources'], tests_included=True)
- self.assertEqual(res, -1)
- self.assertEmpty(out)
+ details, err, _ = logging_run(['resources'], tests_included=True)
+ self.assertEqual(details.exit_code, -1)
self.assertContains(err, 'No tests to run.\n')
def test_no_tests_found_2(self):
- res, out, err, user = logging_run(['foo'], tests_included=True)
- self.assertEqual(res, -1)
- self.assertEmpty(out)
+ details, err, _ = logging_run(['foo'], tests_included=True)
+ self.assertEqual(details.exit_code, -1)
self.assertContains(err, 'No tests to run.\n')
def test_natural_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
- tests_run = get_tests_run(['--order=natural'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=natural'] + tests_to_run)
self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
def test_natural_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
- tests_run = get_tests_run(['--order=natural'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=natural'] + tests_to_run)
self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
def test_random_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
- tests_run = get_tests_run(['--order=random'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=random'] + tests_to_run)
self.assertEqual(sorted(tests_to_run), sorted(tests_run))
def test_random_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
- tests_run = get_tests_run(['--order=random'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=random'] + tests_to_run)
self.assertEqual(tests_run.count('passes/audio.html'), 2)
self.assertEqual(tests_run.count('passes/args.html'), 2)
def test_no_order(self):
tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
- tests_run = get_tests_run(['--order=none'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_to_run, tests_run)
def test_no_order_test_specified_multiple_times(self):
tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
- tests_run = get_tests_run(['--order=none'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_to_run, tests_run)
def test_no_order_with_directory_entries_in_natural_order(self):
tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
- tests_run = get_tests_run(['--order=none'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--order=none'] + tests_to_run)
self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
def test_gc_between_tests(self):
@@ -436,86 +354,80 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
def test_repeat_each(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
- tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
def test_ignore_flag(self):
# Note that passes/image.html is expected to be run since we specified it directly.
- tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'], flatten_batches=True, tests_included=True)
+ tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
self.assertFalse('passes/text.html' in tests_run)
self.assertTrue('passes/image.html' in tests_run)
def test_skipped_flag(self):
- tests_run = get_tests_run(['passes'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['passes'])
self.assertFalse('passes/skipped/skip.html' in tests_run)
num_tests_run_by_default = len(tests_run)
# Check that nothing changes when we specify skipped=default.
- self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'], tests_included=True, flatten_batches=True)),
+ self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
num_tests_run_by_default)
# Now check that we run one more test (the skipped one).
- tests_run = get_tests_run(['--skipped=ignore', 'passes'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--skipped=ignore', 'passes'])
self.assertTrue('passes/skipped/skip.html' in tests_run)
self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
# Now check that we only run the skipped test.
- self.assertEqual(get_tests_run(['--skipped=only', 'passes'], tests_included=True, flatten_batches=True),
- ['passes/skipped/skip.html'])
+ self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
# Now check that we don't run anything.
- self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html'], tests_included=True, flatten_batches=True),
- [])
+ self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
def test_iterations(self):
tests_to_run = ['passes/image.html', 'passes/text.html']
- tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
def test_repeat_each_iterations_num_tests(self):
# The total number of tests should be: number_of_tests *
# repeat_each * iterations
host = MockHost()
- res, out, err, _ = logging_run(['--iterations', '2',
- '--repeat-each', '4',
- '--debug-rwt-logging',
- 'passes/text.html', 'failures/expected/text.html'],
- tests_included=True, host=host, record_results=True)
- self.assertContains(out, "=> Results: 8/16 tests passed (50.0%)\n")
+ _, err, _ = logging_run(
+ ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
+ tests_included=True, host=host)
self.assertContains(err, "All 16 tests ran as expected.\n")
def test_run_chunk(self):
# Test that we actually select the right chunk
- all_tests_run = get_tests_run(flatten_batches=True)
- chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
+ all_tests_run = get_tests_run(['passes', 'failures'])
+ chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
self.assertEqual(all_tests_run[4:8], chunk_tests_run)
# Test that we wrap around if the number of tests is not evenly divisible by the chunk size
tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
- chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
+ chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
def test_run_force(self):
# This raises an exception because we run
# failures/expected/exception.html, which is normally SKIPped.
- # See also the comments in test_exception_raised() about ValueError vs. WorkerException.
self.assertRaises(ValueError, logging_run, ['--force'])
def test_run_part(self):
# Test that we actually select the right part
tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
- tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
# Test that we wrap around if the number of tests is not evenly divisible by the chunk size
# (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
# last part repeats the first two tests).
- chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
+ chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
def test_run_singly(self):
- batch_tests_run = get_tests_run(['--run-singly'])
+ batch_tests_run = get_test_batches(['--run-singly'])
for batch in batch_tests_run:
self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
@@ -525,7 +437,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
host = MockHost()
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
- batches = get_tests_run(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
+ batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
has_passes_text = False
for batch in batches:
self.assertFalse('failures/expected/text.html' in batch)
@@ -534,23 +446,19 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(has_passes_text)
def test_run_singly_actually_runs_tests(self):
- res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
- self.assertEqual(res, unexpected_failures)
+ details, _, _ = logging_run(['--run-singly'], tests_included=True)
+ self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 1) # failures/expected/hang.html actually passes w/ --run-singly.
def test_single_file(self):
- # FIXME: We should consider replacing more of the get_tests_run()-style tests
- # with tests that read the tests_run* files, like this one.
- host = MockHost()
- tests_run = passing_run(['passes/text.html'], tests_included=True, host=host)
- self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/tests_run0.txt'),
- 'passes/text.html\n')
+ tests_run = get_tests_run(['passes/text.html'])
+ self.assertEqual(tests_run, ['passes/text.html'])
def test_single_file_with_prefix(self):
- tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['LayoutTests/passes/text.html'])
self.assertEqual(['passes/text.html'], tests_run)
def test_single_skipped_file(self):
- tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['failures/expected/keybaord.html'])
self.assertEqual([], tests_run)
def test_stderr_is_saved(self):
@@ -563,43 +471,31 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'passes/text.html')
- tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
+ tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
self.assertEqual(['passes/text.html'], tests_run)
host.filesystem.remove(filename)
- res, out, err, user = logging_run(['--test-list=%s' % filename],
- tests_included=True, host=host)
- self.assertEqual(res, -1)
+ details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, -1)
self.assertNotEmpty(err)
def test_test_list_with_prefix(self):
host = MockHost()
filename = '/tmp/foo.txt'
host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
- tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, host=host)
+ tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
self.assertEqual(['passes/text.html'], tests_run)
- def test_unexpected_failures(self):
- # Run tests including the unexpected failures.
- self._url_opened = None
- res, out, err, user = logging_run(tests_included=True)
-
- self.assertEqual(res, unexpected_tests_count)
- self.assertNotEmpty(out)
- self.assertNotEmpty(err)
- self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
-
def test_missing_and_unexpected_results(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
- res, out, err, _ = logging_run(['--no-show-results',
+ details, err, _ = logging_run(['--no-show-results',
'failures/expected/missing_image.html',
'failures/unexpected/missing_text.html',
'failures/unexpected/text-image-checksum.html'],
- tests_included=True, host=host, record_results=True)
+ tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
- file_list.remove('/tmp/layout-test-results/tests_run0.txt')
- self.assertEqual(res, 1)
+ self.assertEqual(details.exit_code, 1)
expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","image_diff_percent":1},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find(expected_token) != -1)
@@ -614,9 +510,9 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
args = ['--pixel-tests', '--pixel-test-directory', 'failures/unexpected/pixeldir',
'failures/unexpected/pixeldir/image_in_pixeldir.html',
'failures/unexpected/image_not_in_pixeldir.html']
- res, out, err, _ = logging_run(extra_args=args, host=host, record_results=True, tests_included=True)
+ details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
- self.assertEqual(res, 1)
+ self.assertEqual(details.exit_code, 1)
expected_token = '"unexpected":{"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE"'
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find(expected_token) != -1)
@@ -624,38 +520,28 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
def test_missing_and_unexpected_results_with_custom_exit_code(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
- class CustomExitCodePort(TestPort):
+ class CustomExitCodePort(test.TestPort):
def exit_code_from_summarized_results(self, unexpected_results):
return unexpected_results['num_regressions'] + unexpected_results['num_missing']
host = MockHost()
options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
test_port = CustomExitCodePort(host, options=options)
- res, out, err, _ = logging_run(['--no-show-results',
+ details, err, _ = logging_run(['--no-show-results',
'failures/expected/missing_image.html',
'failures/unexpected/missing_text.html',
'failures/unexpected/text-image-checksum.html'],
- tests_included=True, host=host, record_results=True, port_obj=test_port)
- self.assertEqual(res, 2)
+ tests_included=True, host=host, port_obj=test_port)
+ self.assertEqual(details.exit_code, 2)
def test_crash_with_stderr(self):
host = MockHost()
- res, buildbot_output, regular_output, user = logging_run([
- 'failures/unexpected/crash-with-stderr.html',
- ],
- tests_included=True,
- record_results=True,
- host=host)
+ _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
def test_no_image_failure_with_image_diff(self):
host = MockHost()
- res, buildbot_output, regular_output, user = logging_run([
- 'failures/unexpected/checksum-with-matching-image.html',
- ],
- tests_included=True,
- record_results=True,
- host=host)
+ _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
def test_crash_log(self):
@@ -666,12 +552,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 12345)
host = MockHost()
host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
- res, buildbot_output, regular_output, user = logging_run([
- 'failures/unexpected/crash-with-stderr.html',
- ],
- tests_included=True,
- record_results=True,
- host=host)
+ _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
expected_crash_log = mock_crash_report
self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
@@ -683,30 +564,20 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
mock_crash_report = make_mock_crash_report_darwin('WebProcess', 12345)
host = MockHost()
host.filesystem.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
- res, buildbot_output, regular_output, user = logging_run([
- 'failures/unexpected/web-process-crash-with-stderr.html',
- ],
- tests_included=True,
- record_results=True,
- host=host)
+ logging_run(['failures/unexpected/web-process-crash-with-stderr.html'], tests_included=True, host=host)
self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), mock_crash_report)
def test_exit_after_n_failures_upload(self):
host = MockHost()
- res, buildbot_output, regular_output, user = logging_run([
- 'failures/unexpected/text-image-checksum.html',
- 'passes/text.html',
- '--exit-after-n-failures', '1',
- ],
- tests_included=True,
- record_results=True,
- host=host)
+ details, regular_output, user = logging_run(
+ ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
+ tests_included=True, host=host)
# By returning False, we know that the incremental results were generated and then deleted.
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
# This checks that we report only the number of tests that actually failed.
- self.assertEqual(res, 1)
+ self.assertEqual(details.exit_code, 1)
# This checks that passes/text.html is considered SKIPped.
self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
@@ -720,54 +591,24 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
def test_exit_after_n_failures(self):
# Unexpected failures should result in tests stopping.
- tests_run = get_tests_run([
- 'failures/unexpected/text-image-checksum.html',
- 'passes/text.html',
- '--exit-after-n-failures', '1',
- ],
- tests_included=True,
- flatten_batches=True)
+ tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
# But we'll keep going for expected ones.
- tests_run = get_tests_run([
- 'failures/expected/text.html',
- 'passes/text.html',
- '--exit-after-n-failures', '1',
- ],
- tests_included=True,
- flatten_batches=True)
+ tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
def test_exit_after_n_crashes(self):
# Unexpected crashes should result in tests stopping.
- tests_run = get_tests_run([
- 'failures/unexpected/crash.html',
- 'passes/text.html',
- '--exit-after-n-crashes-or-timeouts', '1',
- ],
- tests_included=True,
- flatten_batches=True)
+ tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/unexpected/crash.html'], tests_run)
# Same with timeouts.
- tests_run = get_tests_run([
- 'failures/unexpected/timeout.html',
- 'passes/text.html',
- '--exit-after-n-crashes-or-timeouts', '1',
- ],
- tests_included=True,
- flatten_batches=True)
+ tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
# But we'll keep going for expected ones.
- tests_run = get_tests_run([
- 'failures/expected/crash.html',
- 'passes/text.html',
- '--exit-after-n-crashes-or-timeouts', '1',
- ],
- tests_included=True,
- flatten_batches=True)
+ tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
def test_results_directory_absolute(self):
@@ -776,8 +617,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
host = MockHost()
with host.filesystem.mkdtemp() as tmpdir:
- res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
- tests_included=True, host=host)
+ _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
def test_results_directory_default(self):
@@ -785,7 +625,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# look for what the output results url was.
# This is the default location.
- res, out, err, user = logging_run(tests_included=True)
+ _, _, user = logging_run(tests_included=True)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
def test_results_directory_relative(self):
@@ -794,44 +634,62 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
host = MockHost()
host.filesystem.maybe_make_directory('/tmp/cwd')
host.filesystem.chdir('/tmp/cwd')
- res, out, err, user = logging_run(['--results-directory=foo'],
- tests_included=True, host=host)
+ _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
def test_retrying_and_flaky_tests(self):
host = MockHost()
- res, out, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
- self.assertEqual(res, 0)
+ details, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 0)
self.assertTrue('Retrying' in err.getvalue())
- self.assertTrue('Unexpected flakiness' in out.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
- self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/tests_run0.txt'))
self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
# Now we test that --clobber-old-results does remove the old entries and the old retries,
# and that we don't retry again.
host = MockHost()
- res, out, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
- self.assertEqual(res, 1)
+ details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
self.assertTrue('Clobbering old results' in err.getvalue())
self.assertTrue('flaky/text.html' in err.getvalue())
- self.assertTrue('Unexpected text-only failures' in out.getvalue())
- self.assertFalse('Unexpected flakiness' in out.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
self.assertFalse(host.filesystem.exists('retries'))
+ def test_retrying_force_pixel_tests(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--no-pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue('Retrying' in err.getvalue())
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
+ self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
+ json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
+ json = parse_full_results(json_string)
+ self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
+ {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "image_diff_percent": 1})
+ self.assertFalse(json["pixel_tests_enabled"])
+ self.assertEqual(details.enabled_pixel_tests_in_retry, True)
+
+ def test_retrying_uses_retries_directory(self):
+ host = MockHost()
+ details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
+ self.assertEqual(details.exit_code, 1)
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
+ self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
+
def test_run_order__inline(self):
# These next tests test that we run the tests in ascending alphabetical
# order per directory. HTTP tests are sharded separately from other tests,
# so we have to test both.
- tests_run = get_tests_run(['-i', 'passes/passes', 'passes'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
self.assertEqual(tests_run, sorted(tests_run))
- tests_run = get_tests_run(['http/tests/passes'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['http/tests/passes'])
self.assertEqual(tests_run, sorted(tests_run))
def test_tolerance(self):
- class ImageDiffTestPort(TestPort):
+ class ImageDiffTestPort(test.TestPort):
def diff_image(self, expected_contents, actual_contents, tolerance=None):
self.tolerance_used_for_diff_image = self._options.tolerance
return (True, 1, None)
@@ -862,34 +720,34 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
'virtual/passes/text.html', 'virtual/passes/args.html']))
def test_reftest_run(self):
- tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['passes/reftest.html'])
self.assertEqual(['passes/reftest.html'], tests_run)
def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
- tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
self.assertEqual(['passes/reftest.html'], tests_run)
def test_reftest_skip_reftests_if_no_ref_tests(self):
- tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'])
self.assertEqual([], tests_run)
- tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'])
self.assertEqual([], tests_run)
def test_reftest_expected_html_should_be_ignored(self):
- tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['passes/reftest-expected.html'])
self.assertEqual([], tests_run)
def test_reftest_driver_should_run_expected_html(self):
- tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
- self.assertEqual(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
+ tests_run = get_test_results(['passes/reftest.html'])
+ self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
def test_reftest_driver_should_run_expected_mismatch_html(self):
- tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
- self.assertEqual(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
+ tests_run = get_test_results(['passes/mismatch.html'])
+ self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
host = MockHost()
- res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
+ _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
self.assertTrue(json_string.find('"unlistedtest.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_missing_image":true}') != -1)
self.assertTrue(json_string.find('"num_regressions":4') != -1)
@@ -906,7 +764,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
host = MockHost()
host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
- tests_included=True, host=host))
+ tests_included=True, host=host))
def test_no_http_and_force(self):
# See test_run_force, using --force raises an exception.
@@ -918,20 +776,20 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
return [test for test in tests if type in test]
def test_no_http_tests(self):
- batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'], flatten_batches=True)
- self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'http'))
- self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
+ batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'])
+ self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'http'))
+ self.assertTrue(RunTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
- batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
- self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'http'))
- self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
+ batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'])
+ self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'http'))
+ self.assertFalse(RunTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
- batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
- self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
- self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
+ batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'])
+ self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'http'))
+ self.assertTrue(RunTest.has_test_of_type(batch_tests_run_http, 'websocket'))
def test_platform_tests_are_found(self):
- tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'], tests_included=True, flatten_batches=True)
+ tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
@@ -939,8 +797,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
# aren't available.
host = MockHost()
- res, out, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'],
- tests_included=True, record_results=True, host=host)
+ _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
written_files = host.filesystem.written_files
self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
@@ -952,19 +809,13 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertEqual(full_results['has_pretty_patch'], False)
def test_unsupported_platform(self):
- oc = outputcapture.OutputCapture()
- try:
- oc.capture_output()
- res = run_webkit_tests.main(['--platform', 'foo'])
- finally:
- stdout, stderr, logs = oc.restore_output()
+ stdout = StringIO.StringIO()
+ stderr = StringIO.StringIO()
+ res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
- self.assertEqual(stdout, '')
- self.assertTrue('unsupported platform' in stderr)
-
- # This is empty because we don't even get a chance to configure the logger before failing.
- self.assertEqual(logs, '')
+ self.assertEqual(stdout.getvalue(), '')
+ self.assertTrue('unsupported platform' in stderr.getvalue())
def test_verbose_in_child_processes(self):
# When we actually run multiple processes, we may have to reconfigure logging in the
@@ -979,41 +830,22 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
host = MockHost()
port_obj = host.port_factory.get(port_name=options.platform, options=options)
- buildbot_output = StringIO.StringIO()
- regular_output = StringIO.StringIO()
- res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
- self.assertTrue('text.html passed' in regular_output.getvalue())
- self.assertTrue('image.html passed' in regular_output.getvalue())
+ logging_stream = StringIO.StringIO()
+ run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
+ self.assertTrue('text.html passed' in logging_stream.getvalue())
+ self.assertTrue('image.html passed' in logging_stream.getvalue())
class EndToEndTest(unittest.TestCase):
- def parse_full_results(self, full_results_text):
- json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
- compressed_results = json.loads(json_to_eval)
- return compressed_results
-
- def test_end_to_end(self):
- host = MockHost()
- res, out, err, user = logging_run(record_results=True, tests_included=True, host=host)
-
- self.assertEqual(res, unexpected_tests_count)
- results = self.parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
-
- # Check to ensure we're passing back image diff %age correctly.
- self.assertEqual(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
-
- # Check that we attempted to display the results page in a browser.
- self.assertTrue(user.opened_urls)
-
def test_reftest_with_two_notrefs(self):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
- res, out, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host, record_results=True)
+ _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
file_list = host.filesystem.written_files.keys()
- file_list.remove('/tmp/layout-test-results/tests_run0.txt')
+
json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
- json = self.parse_full_results(json_string)
+ json = parse_full_results(json_string)
self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
@@ -1041,16 +873,12 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
- res, out, err, _ = logging_run(['--pixel-tests',
- '--reset-results',
- 'passes/image.html',
- 'failures/expected/missing_image.html'],
- tests_included=True, host=host, new_results=True)
+ details, err, _ = logging_run(
+ ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
+ tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
- file_list.remove('/tmp/layout-test-results/tests_run0.txt')
- self.assertEqual(res, 0)
- self.assertEmpty(out)
- self.assertEqual(len(file_list), 4)
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 8)
self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
@@ -1058,17 +886,15 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
# Test that we update expectations in place. If the expectation
# is missing, update the expected generic location.
host = MockHost()
- res, out, err, _ = logging_run(['--no-show-results',
- 'failures/unexpected/missing_text.html',
- 'failures/unexpected/missing_image.html',
- 'failures/unexpected/missing_audio.html',
- 'failures/unexpected/missing_render_tree_dump.html'],
- tests_included=True, host=host, new_results=True)
+ details, err, _ = logging_run(['--no-show-results',
+ 'failures/unexpected/missing_text.html',
+ 'failures/unexpected/missing_image.html',
+ 'failures/unexpected/missing_audio.html',
+ 'failures/unexpected/missing_render_tree_dump.html'],
+ tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
- file_list.remove('/tmp/layout-test-results/tests_run0.txt')
- self.assertEqual(res, 0)
- self.assertNotEmpty(out)
- self.assertEqual(len(file_list), 6)
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 10)
self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
@@ -1077,16 +903,12 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
# Test that we update the platform expectations in the version-specific directories
# for both existing and new baselines.
host = MockHost()
- res, out, err, _ = logging_run(['--pixel-tests',
- '--new-baseline',
- 'passes/image.html',
- 'failures/expected/missing_image.html'],
- tests_included=True, host=host, new_results=True)
+ details, err, _ = logging_run(
+ ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
+ tests_included=True, host=host, new_results=True)
file_list = host.filesystem.written_files.keys()
- file_list.remove('/tmp/layout-test-results/tests_run0.txt')
- self.assertEqual(res, 0)
- self.assertEmpty(out)
- self.assertEqual(len(file_list), 4)
+ self.assertEqual(details.exit_code, 0)
+ self.assertEqual(len(file_list), 8)
self.assertBaselines(file_list,
"platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
self.assertBaselines(file_list,
@@ -1109,5 +931,38 @@ class PortTest(unittest.TestCase):
def disabled_test_mac_lion(self):
self.assert_mock_port_works('mac-lion')
-if __name__ == '__main__':
- unittest.main()
+
+class MainTest(unittest.TestCase):
+ def test_exception_handling(self):
+ orig_run_fn = run_webkit_tests.run
+
+ # unused args pylint: disable=W0613
+ def interrupting_run(port, options, args, stderr):
+ raise KeyboardInterrupt
+
+ def successful_run(port, options, args, stderr):
+
+ class FakeRunDetails(object):
+ exit_code = -1
+
+ return FakeRunDetails()
+
+ def exception_raising_run(port, options, args, stderr):
+ assert False
+
+ stdout = StringIO.StringIO()
+ stderr = StringIO.StringIO()
+ try:
+ run_webkit_tests.run = interrupting_run
+ res = run_webkit_tests.main([], stdout, stderr)
+ self.assertEqual(res, run_webkit_tests.INTERRUPTED_EXIT_STATUS)
+
+ run_webkit_tests.run = successful_run
+ res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
+ self.assertEqual(res, -1)
+
+ run_webkit_tests.run = exception_raising_run
+ res = run_webkit_tests.main([], stdout, stderr)
+ self.assertEqual(res, run_webkit_tests.EXCEPTIONAL_EXIT_STATUS)
+ finally:
+ run_webkit_tests.run = orig_run_fn
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
index 7dede92a6..eb64d8299 100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py
index f3ca6a1a8..5b4ffa79a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/apache_http_server_unittest.py
@@ -28,12 +28,12 @@
import re
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.host_mock import MockHost
-from webkitpy.layout_tests.port import test
+from webkitpy.port import test
from webkitpy.layout_tests.servers.apache_http_server import LayoutTestApacheHttpd
from webkitpy.layout_tests.servers.http_server_base import ServerError
@@ -64,7 +64,7 @@ class TestLayoutTestApacheHttpd(unittest.TestCase):
server.stop()
finally:
_, _, logs = oc.restore_output()
- self.assertTrue("StartServers 4" in logs)
- self.assertTrue("MinSpareServers 4" in logs)
- self.assertTrue("MaxSpareServers 4" in logs)
+ self.assertIn("StartServers 4", logs)
+ self.assertIn("MinSpareServers 4", logs)
+ self.assertIn("MaxSpareServers 4", logs)
self.assertTrue(host.filesystem.exists("/mock/output_dir/httpd.conf"))
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
index 107c242c6..604f76b89 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -215,6 +214,15 @@ class Lighttpd(http_server_base.HttpServerBase):
def _check_and_kill(self):
if self._executive.check_running_pid(self._pid):
- self._executive.kill_process(self._pid)
+ host = self._port_obj.host
+ if host.platform.is_win() and not host.platform.is_cygwin():
+ # FIXME: https://bugs.webkit.org/show_bug.cgi?id=106838
+ # We need to kill all of the child processes as well as the
+ # parent, so we can't use executive.kill_process().
+ #
+ # If this is actually working, we should figure out a clean API.
+ self._executive.run_command(["taskkill.exe", "/f", "/t", "/pid", self._pid], error_handler=self._executive.ignore_error)
+ else:
+ self._executive.kill_process(self._pid)
return False
return True
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
index 6a0ce58b0..3ce15a568 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -76,8 +75,12 @@ class HttpServerBase(object):
# Stop any stale servers left over from previous instances.
if self._filesystem.exists(self._pid_file):
- self._pid = int(self._filesystem.read_text_file(self._pid_file))
- self._stop_running_server()
+ try:
+ self._pid = int(self._filesystem.read_text_file(self._pid_file))
+ self._stop_running_server()
+ except (ValueError, UnicodeDecodeError):
+ # These could be raised if the pid file is corrupt.
+ self._remove_pid_file()
self._pid = None
self._remove_stale_logs()
@@ -95,29 +98,37 @@ class HttpServerBase(object):
def stop(self):
"""Stops the server. Stopping a server that isn't started is harmless."""
actual_pid = None
- if self._filesystem.exists(self._pid_file):
- actual_pid = int(self._filesystem.read_text_file(self._pid_file))
+ try:
+ if self._filesystem.exists(self._pid_file):
+ try:
+ actual_pid = int(self._filesystem.read_text_file(self._pid_file))
+ except (ValueError, UnicodeDecodeError):
+ # These could be raised if the pid file is corrupt.
+ pass
+ if not self._pid:
+ self._pid = actual_pid
+
if not self._pid:
- self._pid = actual_pid
-
- if not self._pid:
- return
-
- if not actual_pid:
- _log.warning('Failed to stop %s: pid file is missing' % self._name)
- return
- if self._pid != actual_pid:
- _log.warning('Failed to stop %s: pid file contains %d, not %d' %
- (self._name, actual_pid, self._pid))
- # Try to kill the existing pid, anyway, in case it got orphaned.
- self._executive.kill_process(self._pid)
+ return
+
+ if not actual_pid:
+ _log.warning('Failed to stop %s: pid file is missing' % self._name)
+ return
+ if self._pid != actual_pid:
+ _log.warning('Failed to stop %s: pid file contains %d, not %d' %
+ (self._name, actual_pid, self._pid))
+ # Try to kill the existing pid, anyway, in case it got orphaned.
+ self._executive.kill_process(self._pid)
+ self._pid = None
+ return
+
+ _log.debug("Attempting to shut down %s server at pid %d" % (self._name, self._pid))
+ self._stop_running_server()
+ _log.debug("%s server at pid %d stopped" % (self._name, self._pid))
self._pid = None
- return
-
- _log.debug("Attempting to shut down %s server at pid %d" % (self._name, self._pid))
- self._stop_running_server()
- _log.debug("%s server at pid %d stopped" % (self._name, self._pid))
- self._pid = None
+ finally:
+ # Make sure we delete the pid file no matter what happens.
+ self._remove_pid_file()
def _prepare_config(self):
"""This routine can be overridden by subclasses to do any sort
@@ -144,6 +155,10 @@ class HttpServerBase(object):
# Utility routines.
+ def _remove_pid_file(self):
+ if self._filesystem.exists(self._pid_file):
+ self._filesystem.remove(self._pid_file)
+
def _remove_log_files(self, folder, starts_with):
files = self._filesystem.listdir(folder)
for file in files:
@@ -194,7 +209,7 @@ class HttpServerBase(object):
except IOError, e:
if e.errno in (errno.EALREADY, errno.EADDRINUSE):
raise ServerError('Port %d is already in use.' % port)
- elif sys.platform == 'win32' and e.errno in (errno.WSAEACCES,): # pylint: disable-msg=E1101
+ elif sys.platform == 'win32' and e.errno in (errno.WSAEACCES,): # pylint: disable=E1101
raise ServerError('Port %d is already in use.' % port)
else:
raise
diff --git a/Tools/Scripts/make-gypi b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base_unittest.py
index cebac2745..2cf1d2cf0 100755..100644
--- a/Tools/Scripts/make-gypi
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_base_unittest.py
@@ -1,5 +1,4 @@
-#!/usr/bin/env python
-# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -27,46 +26,33 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import os
-import re
-import sys
+import unittest2 as unittest
-GYPI_TEMPLATE = """{
- 'variables': {
- '%s': [
-%s
- ]
- }
-}"""
+from webkitpy.common.host_mock import MockHost
+from webkitpy.port import test
+from webkitpy.layout_tests.servers.http_server_base import HttpServerBase
-def find_source_code():
- source_code_regexp = re.compile(r'\.(cpp|h|m|mm)$')
- collected_files = []
- for directory_path, directory_names, file_names in os.walk('.'):
- for file_name in file_names:
- if source_code_regexp.search(file_name):
- relative_path = os.path.join(directory_path, file_name)
- collected_files.append(os.path.relpath(relative_path, '.'))
+class TestHttpServerBase(unittest.TestCase):
+ def test_corrupt_pid_file(self):
+ # This tests that if the pid file is corrupt or invalid,
+ # both start() and stop() deal with it correctly and delete the file.
+ host = MockHost()
+ test_port = test.TestPort(host)
- return collected_files
+ server = HttpServerBase(test_port)
+ server._pid_file = '/tmp/pidfile'
+ server._spawn_process = lambda: 4
+ server._is_server_running_on_all_ports = lambda: True
-def build_file_line(file_name, indent):
- return indent + "'%s'," % file_name
+ host.filesystem.write_text_file(server._pid_file, 'foo')
+ server.stop()
+ self.assertEqual(host.filesystem.files[server._pid_file], None)
-def build_file_list(source_code, indent):
- return '\n'.join([build_file_line(file_name, indent) for file_name in sorted(source_code)])
+ host.filesystem.write_text_file(server._pid_file, 'foo')
+ server.start()
+ self.assertEqual(server._pid, 4)
-def build_gypi(project):
- key = project.lower() + '_files'
- value = build_file_list(find_source_code(), ' ' * 3)
- return GYPI_TEMPLATE % (key, value)
-
-def main():
- if len(sys.argv) < 2:
- print 'Usage: %s project_name' % sys.argv[0]
- return
- print build_gypi(sys.argv[1])
-
-if __name__ == "__main__":
- main()
+ # Note that the pid file would not be None if _spawn_process()
+ # was actually a real implementation.
+ self.assertEqual(host.filesystem.files[server._pid_file], None)
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
index 37faa6429..aa6e59dc5 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
@@ -36,7 +36,7 @@ import socket
import subprocess
import sys
import tempfile
-import unittest
+import unittest2 as unittest
class BaseTest(unittest.TestCase):
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py
index 5cc4a6ee0..621838f00 100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_unittest.py
@@ -26,12 +26,12 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
import re
import sys
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
-from webkitpy.layout_tests.port import test
+from webkitpy.port import test
from webkitpy.layout_tests.servers.http_server import Lighttpd
from webkitpy.layout_tests.servers.http_server_base import ServerError
@@ -62,3 +62,40 @@ class TestHttpServer(unittest.TestCase):
'alias.url += ( "/mock/another-additional-dir" => "/mock-checkout/one-additional-dir" )',
'alias.url += ( "/media-resources" => "/test.checkout/LayoutTests/media" )',
])
+
+ def test_win32_start_and_stop(self):
+ host = MockHost()
+ test_port = test.TestPort(host)
+ host.filesystem.write_text_file(
+ "/mock-checkout/Tools/Scripts/webkitpy/layout_tests/servers/lighttpd.conf", "Mock Config\n")
+ host.filesystem.write_text_file(
+ "/usr/lib/lighttpd/liblightcomp.dylib", "Mock dylib")
+
+ host.platform.is_win = lambda: True
+ host.platform.is_cygwin = lambda: False
+
+ server = Lighttpd(test_port, "/mock/output_dir",
+ additional_dirs={
+ "/mock/one-additional-dir": "/mock-checkout/one-additional-dir",
+ "/mock/another-additional-dir": "/mock-checkout/one-additional-dir"})
+ server._check_that_all_ports_are_available = lambda: True
+ server._is_server_running_on_all_ports = lambda: True
+
+ server.start()
+ self.assertNotEquals(host.executive.calls, [])
+
+ def wait_for_action(action):
+ if action():
+ return True
+ return action()
+
+ def mock_returns(return_values):
+ def return_value_thunk(*args, **kwargs):
+ return return_values.pop(0)
+ return return_value_thunk
+
+ host.executive.check_running_pid = mock_returns([True, False])
+ server._wait_for_action = wait_for_action
+
+ server.stop()
+ self.assertEqual(['taskkill.exe', '/f', '/t', '/pid', 42], host.executive.calls[1])
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py b/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py
index 93747f690..2ffdc321d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/websocket_server.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
new file mode 100644
index 000000000..3191b84ea
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+from webkitpy.layout_tests.models import test_expectations
+
+from webkitpy.common.net import resultsjsonparser
+
+
+TestExpectations = test_expectations.TestExpectations
+TestExpectationParser = test_expectations.TestExpectationParser
+
+
+class BuildBotPrinter(object):
+ # This output is parsed by buildbots and must only be changed in coordination with buildbot scripts (see webkit.org's
+ # Tools/BuildSlaveSupport/build.webkit.org-config/master.cfg: RunWebKitTests._parseNewRunWebKitTestsOutput
+ # and chromium.org's buildbot/master.chromium/scripts/master/log_parser/webkit_test_command.py).
+
+ def __init__(self, stream, debug_logging):
+ self.stream = stream
+ self.debug_logging = debug_logging
+
+ def print_results(self, run_details):
+ if self.debug_logging:
+ self.print_run_results(run_details.initial_results)
+ self.print_unexpected_results(run_details.summarized_results, run_details.enabled_pixel_tests_in_retry)
+
+ def _print(self, msg):
+ self.stream.write(msg + '\n')
+
+ def print_run_results(self, run_results):
+ failed = run_results.total_failures
+ total = run_results.total
+ passed = total - failed - run_results.remaining
+ percent_passed = 0.0
+ if total > 0:
+ percent_passed = float(passed) * 100 / total
+
+ self._print("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, percent_passed))
+ self._print("")
+ self._print_run_results_entry(run_results, test_expectations.NOW, "Tests to be fixed")
+
+ self._print("")
+ # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
+ self._print_run_results_entry(run_results, test_expectations.WONTFIX,
+ "Tests that will only be fixed if they crash (WONTFIX)")
+ self._print("")
+
+ def _print_run_results_entry(self, run_results, timeline, heading):
+ total = len(run_results.tests_by_timeline[timeline])
+ not_passing = (total -
+ len(run_results.tests_by_expectation[test_expectations.PASS] &
+ run_results.tests_by_timeline[timeline]))
+ self._print("=> %s (%d):" % (heading, not_passing))
+
+ for result in TestExpectations.EXPECTATION_ORDER:
+ if result in (test_expectations.PASS, test_expectations.SKIP):
+ continue
+ results = (run_results.tests_by_expectation[result] & run_results.tests_by_timeline[timeline])
+ desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
+ if not_passing and len(results):
+ pct = len(results) * 100.0 / not_passing
+ self._print(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
+
+ def print_unexpected_results(self, summarized_results, enabled_pixel_tests_in_retry=False):
+ passes = {}
+ flaky = {}
+ regressions = {}
+
+ def add_to_dict_of_lists(dict, key, value):
+ dict.setdefault(key, []).append(value)
+
+ def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
+ actual = results['actual'].split(" ")
+ expected = results['expected'].split(" ")
+
+ def is_expected(result):
+ return (result in expected) or (result in ('AUDIO', 'TEXT', 'IMAGE+TEXT') and 'FAIL' in expected)
+
+ if all(is_expected(actual_result) for actual_result in actual):
+ # Don't print anything for tests that ran as expected.
+ return
+
+ if actual == ['PASS']:
+ if 'CRASH' in expected:
+ add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
+ elif 'TIMEOUT' in expected:
+ add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
+ else:
+ add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
+ elif enabled_pixel_tests_in_retry and actual == ['TEXT', 'IMAGE+TEXT']:
+ add_to_dict_of_lists(regressions, actual[0], test)
+ elif len(actual) > 1:
+ # We group flaky tests by the first actual result we got.
+ add_to_dict_of_lists(flaky, actual[0], test)
+ else:
+ add_to_dict_of_lists(regressions, results['actual'], test)
+
+ resultsjsonparser.for_each_test(summarized_results['tests'], add_result)
+
+ if len(passes) or len(flaky) or len(regressions):
+ self._print("")
+ if len(passes):
+ for key, tests in passes.iteritems():
+ self._print("%s: (%d)" % (key, len(tests)))
+ tests.sort()
+ for test in tests:
+ self._print(" %s" % test)
+ self._print("")
+ self._print("")
+
+ if len(flaky):
+ descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+ for key, tests in flaky.iteritems():
+ result = TestExpectations.EXPECTATIONS[key.lower()]
+ self._print("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
+ tests.sort()
+
+ for test in tests:
+ result = resultsjsonparser.result_for_test(summarized_results['tests'], test)
+ actual = result['actual'].split(" ")
+ expected = result['expected'].split(" ")
+ result = TestExpectations.EXPECTATIONS[key.lower()]
+ # FIXME: clean this up once the old syntax is gone
+ new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
+ self._print(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
+ self._print("")
+ self._print("")
+
+ if len(regressions):
+ descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
+ for key, tests in regressions.iteritems():
+ result = TestExpectations.EXPECTATIONS[key.lower()]
+ self._print("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
+ tests.sort()
+ for test in tests:
+ self._print(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
+ self._print("")
+
+ if len(summarized_results['tests']) and self.debug_logging:
+ self._print("%s" % ("-" * 78))
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
new file mode 100644
index 000000000..5ce15c1dc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/views/buildbot_results_unittest.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import StringIO
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models import test_run_results
+from webkitpy.layout_tests.models import test_run_results_unittest
+from webkitpy.layout_tests.views import buildbot_results
+
+
+class BuildBotPrinterTests(unittest.TestCase):
+ def assertEmpty(self, stream):
+ self.assertFalse(stream.getvalue())
+
+ def assertNotEmpty(self, stream):
+ self.assertTrue(stream.getvalue())
+
+ def get_printer(self):
+ stream = StringIO.StringIO()
+ printer = buildbot_results.BuildBotPrinter(stream, debug_logging=True)
+ return printer, stream
+
+ def test_print_unexpected_results(self):
+ port = MockHost().port_factory.get('test')
+ printer, out = self.get_printer()
+
+ # test everything running as expected
+ DASHED_LINE = "-" * 78 + "\n"
+ summary = test_run_results_unittest.summarized_results(port, expected=True, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertEqual(out.getvalue(), DASHED_LINE)
+
+ # test failures
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ # test unexpected flaky
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=True)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=False, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ printer, out = self.get_printer()
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+ printer.print_unexpected_results(summary)
+ self.assertNotEmpty(out)
+
+ def test_print_results(self):
+ port = MockHost().port_factory.get('test')
+ printer, out = self.get_printer()
+ initial_results = test_run_results_unittest.run_results(port)
+ summary = test_run_results_unittest.summarized_results(port, expected=False, passing=True, flaky=False)
+ details = test_run_results.RunDetails(summary['num_regressions'], summary, initial_results, None)
+ printer.print_results(details)
+ self.assertNotEmpty(out)
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
index acea93ea4..fd04ad868 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
index 6620cbb04..0eaec2db9 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/metered_stream_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -30,7 +29,7 @@
import logging
import re
import StringIO
-import unittest
+import unittest2 as unittest
from webkitpy.layout_tests.views.metered_stream import MeteredStream
@@ -152,7 +151,3 @@ class VerboseTest(RegularTest):
self.logger.info('foo %s %d', 'bar', 2)
self.assertEqual(len(self.buflist), 1)
self.assertTrue(self.buflist[0].endswith('foo bar 2\n'))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
index b7a9195a8..9a9caa589 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -33,7 +32,6 @@ import math
import optparse
from webkitpy.tool import grammar
-from webkitpy.common.net import resultsjsonparser
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
from webkitpy.layout_tests.views.metered_stream import MeteredStream
@@ -56,23 +54,13 @@ def print_options():
class Printer(object):
- """Class handling all non-debug-logging printing done by run-webkit-tests.
-
- Printing from run-webkit-tests falls into two buckets: general or
- regular output that is read only by humans and can be changed at any
- time, and output that is parsed by buildbots (and humans) and hence
- must be changed more carefully and in coordination with the buildbot
- parsing code (in chromium.org's buildbot/master.chromium/scripts/master/
- log_parser/webkit_test_command.py script).
-
- By default the buildbot-parsed code gets logged to stdout, and regular
- output gets logged to stderr."""
- def __init__(self, port, options, regular_output, buildbot_output, logger=None):
- self.num_completed = 0
+ """Class handling all non-debug-logging printing done by run-webkit-tests."""
+
+ def __init__(self, port, options, regular_output, logger=None):
+ self.num_started = 0
self.num_tests = 0
self._port = port
self._options = options
- self._buildbot_stream = buildbot_output
self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
number_of_columns=self._port.host.platform.terminal_width())
self._running_tests = []
@@ -110,32 +98,32 @@ class Printer(object):
self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
- num_unique_tests = num_to_run / (repeat_each * iterations)
- found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_unique_tests)
+ found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
if repeat_each * iterations > 1:
found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
- found_str += ', skipping %d' % (num_all_test_files - num_unique_tests)
+ found_str += ', skipping %d' % (num_all_test_files - num_to_run)
self._print_default(found_str + '.')
- def print_expected(self, result_summary, tests_with_result_type_callback):
- self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback)
- self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback)
- self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
+ def print_expected(self, run_results, tests_with_result_type_callback):
+ self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
+ self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
+ self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
- self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize('shard', num_shards)))
+ self._print_default("Running 1 %s." % driver_name)
+ self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
else:
- self._print_default("Running %d %ss in parallel over %d shards (%d locked)." %
- (num_workers, driver_name, num_shards, num_locked_shards))
+ self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
+ self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
self._print_default('')
- def _print_expected_results_of_type(self, result_summary, result_type, result_type_str, tests_with_result_type_callback):
+ def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
- now = result_summary.tests_by_timeline[test_expectations.NOW]
- wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
+ now = run_results.tests_by_timeline[test_expectations.NOW]
+ wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
# We use a fancy format string in order to print the data out in a
# nicely-aligned table.
@@ -149,39 +137,45 @@ class Printer(object):
ndigits = int(math.log10(len(num))) + 1
return ndigits
- def print_results(self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results):
- self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary)
- self._print_result_summary(result_summary)
- self._print_one_line_summary(result_summary.total - result_summary.expected_skips,
- result_summary.expected - result_summary.expected_skips,
- result_summary.unexpected)
- self._print_unexpected_results(unexpected_results)
-
- def _print_timing_statistics(self, total_time, thread_timings,
- directory_test_timings, individual_test_timings,
- result_summary):
+ def print_results(self, run_time, run_results, summarized_results):
+ self._print_timing_statistics(run_time, run_results)
+ self._print_one_line_summary(run_results.total - run_results.expected_skips,
+ run_results.expected - run_results.expected_skips,
+ run_results.unexpected)
+
+ def _print_timing_statistics(self, total_time, run_results):
self._print_debug("Test timing:")
self._print_debug(" %6.2f total testing time" % total_time)
self._print_debug("")
+
+ self._print_worker_statistics(run_results, int(self._options.child_processes))
+ self._print_aggregate_test_statistics(run_results)
+ self._print_individual_test_times(run_results)
+ self._print_directory_timings(run_results)
+
+ def _print_worker_statistics(self, run_results, num_workers):
self._print_debug("Thread timing:")
+ stats = {}
cuml_time = 0
- for t in thread_timings:
- self._print_debug(" %10s: %5d tests, %6.2f secs" % (t['name'], t['num_tests'], t['total_time']))
- cuml_time += t['total_time']
- self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / int(self._options.child_processes)))
+ for result in run_results.results_by_name.values():
+ stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
+ stats[result.worker_name]['num_tests'] += 1
+ stats[result.worker_name]['total_time'] += result.total_run_time
+ cuml_time += result.total_run_time
+
+ for worker_name in stats:
+ self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
+ self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
self._print_debug("")
- self._print_aggregate_test_statistics(individual_test_timings)
- self._print_individual_test_times(individual_test_timings, result_summary)
- self._print_directory_timings(directory_test_timings)
-
- def _print_aggregate_test_statistics(self, individual_test_timings):
- times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
+ def _print_aggregate_test_statistics(self, run_results):
+ times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
- def _print_individual_test_times(self, individual_test_timings, result_summary):
+ def _print_individual_test_times(self, run_results):
# Reverse-sort by the time spent in DumpRenderTree.
- individual_test_timings.sort(lambda a, b: cmp(b.test_run_time, a.test_run_time))
+
+ individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
@@ -189,12 +183,12 @@ class Printer(object):
for test_tuple in individual_test_timings:
test_name = test_tuple.test_name
is_timeout_crash_or_slow = False
- if test_name in result_summary.slow_tests:
+ if test_name in run_results.slow_tests:
is_timeout_crash_or_slow = True
slow_tests.append(test_tuple)
- if test_name in result_summary.failures:
- result = result_summary.results[test_name].type
+ if test_name in run_results.failures_by_name:
+ result = run_results.results_by_name[test_name].type
if (result == test_expectations.TIMEOUT or
result == test_expectations.CRASH):
is_timeout_crash_or_slow = True
@@ -219,18 +213,23 @@ class Printer(object):
test_run_time = round(test_tuple.test_run_time, 1)
self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
- def _print_directory_timings(self, directory_test_timings):
+ def _print_directory_timings(self, run_results):
+ stats = {}
+ for result in run_results.results_by_name.values():
+ stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
+ stats[result.shard_name]['num_tests'] += 1
+ stats[result.shard_name]['total_time'] += result.total_run_time
+
timings = []
- for directory in directory_test_timings:
- num_tests, time_for_directory = directory_test_timings[directory]
- timings.append((round(time_for_directory, 1), directory, num_tests))
+ for directory in stats:
+ timings.append((directory, round(stats[directory]['total_time'], 1), stats[directory]['num_tests']))
timings.sort()
self._print_debug("Time to process slowest subdirectories:")
min_seconds_to_print = 10
for timing in timings:
if timing[0] > min_seconds_to_print:
- self._print_debug(" %s took %s seconds to run %s tests." % (timing[1], timing[0], timing[2]))
+ self._print_debug(" %s took %s seconds to run %s tests." % timing)
self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
@@ -263,44 +262,6 @@ class Printer(object):
self._print_debug(" Standard dev: %6.3f" % std_deviation)
self._print_debug("")
- def _print_result_summary(self, result_summary):
- if not self._options.debug_rwt_logging:
- return
-
- failed = result_summary.total_failures
- total = result_summary.total - result_summary.expected_skips
- passed = total - failed - result_summary.remaining
- pct_passed = 0.0
- if total > 0:
- pct_passed = float(passed) * 100 / total
-
- self._print_for_bot("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, pct_passed))
- self._print_for_bot("")
- self._print_result_summary_entry(result_summary, test_expectations.NOW, "Tests to be fixed")
-
- self._print_for_bot("")
- # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
- self._print_result_summary_entry(result_summary, test_expectations.WONTFIX,
- "Tests that will only be fixed if they crash (WONTFIX)")
- self._print_for_bot("")
-
- def _print_result_summary_entry(self, result_summary, timeline, heading):
- total = len(result_summary.tests_by_timeline[timeline])
- not_passing = (total -
- len(result_summary.tests_by_expectation[test_expectations.PASS] &
- result_summary.tests_by_timeline[timeline]))
- self._print_for_bot("=> %s (%d):" % (heading, not_passing))
-
- for result in TestExpectations.EXPECTATION_ORDER:
- if result in (test_expectations.PASS, test_expectations.SKIP):
- continue
- results = (result_summary.tests_by_expectation[result] &
- result_summary.tests_by_timeline[timeline])
- desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
- if not_passing and len(results):
- pct = len(results) * 100.0 / not_passing
- self._print_for_bot(" %5d %-24s (%4.1f%%)" % (len(results), desc, pct))
-
def _print_one_line_summary(self, total, expected, unexpected):
incomplete = total - expected - unexpected
incomplete_str = ''
@@ -328,7 +289,7 @@ class Printer(object):
def _test_status_line(self, test_name, suffix):
format_string = '[%d/%d] %s%s'
- status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
+ status_line = format_string % (self.num_started, self.num_tests, test_name, suffix)
if len(status_line) > self._meter.number_of_columns():
overflow_columns = len(status_line) - self._meter.number_of_columns()
ellipsis = '...'
@@ -340,9 +301,10 @@ class Printer(object):
new_length = len(test_name) - overflow_columns - len(ellipsis)
prefix = int(new_length / 2)
test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
- return format_string % (self.num_completed, self.num_tests, test_name, suffix)
+ return format_string % (self.num_started, self.num_tests, test_name, suffix)
def print_started_test(self, test_name):
+ self.num_started += 1
self._running_tests.append(test_name)
if len(self._running_tests) > 1:
suffix = ' (+%d)' % (len(self._running_tests) - 1)
@@ -355,7 +317,6 @@ class Printer(object):
write(self._test_status_line(test_name, suffix))
def print_finished_test(self, result, expected, exp_str, got_str):
- self.num_completed += 1
test_name = result.test_name
result_message = self._result_message(result.type, result.failures, expected, self._options.verbose)
@@ -364,7 +325,7 @@ class Printer(object):
self._print_test_trace(result, exp_str, got_str)
elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
self.writeln(self._test_status_line(test_name, result_message))
- elif self.num_completed == self.num_tests:
+ elif self.num_started == self.num_tests:
self._meter.write_update('')
else:
if test_name == self._running_tests[0]:
@@ -411,75 +372,6 @@ class Printer(object):
relpath = '<none>'
self._print_default(' %s: %s' % (extension[1:], relpath))
- def _print_unexpected_results(self, unexpected_results):
- # Prints to the buildbot stream
- passes = {}
- flaky = {}
- regressions = {}
-
- def add_to_dict_of_lists(dict, key, value):
- dict.setdefault(key, []).append(value)
-
- def add_result(test, results, passes=passes, flaky=flaky, regressions=regressions):
- actual = results['actual'].split(" ")
- expected = results['expected'].split(" ")
- if actual == ['PASS']:
- if 'CRASH' in expected:
- add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
- elif 'TIMEOUT' in expected:
- add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
- else:
- add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
- elif len(actual) > 1:
- # We group flaky tests by the first actual result we got.
- add_to_dict_of_lists(flaky, actual[0], test)
- else:
- add_to_dict_of_lists(regressions, results['actual'], test)
-
- resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)
-
- if len(passes) or len(flaky) or len(regressions):
- self._print_for_bot("")
- if len(passes):
- for key, tests in passes.iteritems():
- self._print_for_bot("%s: (%d)" % (key, len(tests)))
- tests.sort()
- for test in tests:
- self._print_for_bot(" %s" % test)
- self._print_for_bot("")
- self._print_for_bot("")
-
- if len(flaky):
- descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
- for key, tests in flaky.iteritems():
- result = TestExpectations.EXPECTATIONS[key.lower()]
- self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result], len(tests)))
- tests.sort()
-
- for test in tests:
- result = resultsjsonparser.result_for_test(unexpected_results['tests'], test)
- actual = result['actual'].split(" ")
- expected = result['expected'].split(" ")
- result = TestExpectations.EXPECTATIONS[key.lower()]
- # FIXME: clean this up once the old syntax is gone
- new_expectations_list = [TestExpectationParser._inverted_expectation_tokens[exp] for exp in list(set(actual) | set(expected))]
- self._print_for_bot(" %s [ %s ]" % (test, " ".join(new_expectations_list)))
- self._print_for_bot("")
- self._print_for_bot("")
-
- if len(regressions):
- descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
- for key, tests in regressions.iteritems():
- result = TestExpectations.EXPECTATIONS[key.lower()]
- self._print_for_bot("Regressions: Unexpected %s (%d)" % (descriptions[result], len(tests)))
- tests.sort()
- for test in tests:
- self._print_for_bot(" %s [ %s ]" % (test, TestExpectationParser._inverted_expectation_tokens[key]))
- self._print_for_bot("")
-
- if len(unexpected_results['tests']) and self._options.debug_rwt_logging:
- self._print_for_bot("%s" % ("-" * 78))
-
def _print_quiet(self, msg):
self.writeln(msg)
@@ -491,9 +383,6 @@ class Printer(object):
if self._options.debug_rwt_logging:
self.writeln(msg)
- def _print_for_bot(self, msg):
- self._buildbot_stream.write(msg + "\n")
-
def write_update(self, msg):
self._meter.write_update(msg)
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
index c0b928b0c..d65cc5e12 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010, 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -33,14 +32,13 @@ import optparse
import StringIO
import sys
import time
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system import logtesting
-from webkitpy.layout_tests import port
+from webkitpy import port
from webkitpy.layout_tests.controllers import manager
-from webkitpy.layout_tests.models import result_summary
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
@@ -56,7 +54,7 @@ def get_options(args):
class TestUtilityFunctions(unittest.TestCase):
def test_print_options(self):
options, args = get_options([])
- self.assertTrue(options is not None)
+ self.assertIsNotNone(options)
class Testprinter(unittest.TestCase):
@@ -83,9 +81,8 @@ class Testprinter(unittest.TestCase):
nproc = 2
regular_output = StringIO.StringIO()
- buildbot_output = StringIO.StringIO()
- printer = printing.Printer(self._port, options, regular_output, buildbot_output)
- return printer, regular_output, buildbot_output
+ printer = printing.Printer(self._port, options, regular_output)
+ return printer, regular_output
def get_result(self, test_name, result_type=test_expectations.PASS, run_time=0):
failures = []
@@ -95,168 +92,59 @@ class Testprinter(unittest.TestCase):
failures = [test_failures.FailureCrash()]
return test_results.TestResult(test_name, failures=failures, test_run_time=run_time)
- def get_result_summary(self, test_names, expectations_str):
- port.test_expectations = lambda: expectations_str
- port.test_expectations_overrides = lambda: None
- expectations = test_expectations.TestExpectations(self._port, test_names)
-
- rs = result_summary.ResultSummary(expectations, test_names, 1, set())
- return test_names, rs, expectations
-
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
printer.cleanup()
printer.cleanup()
printer = None
def test_print_config(self):
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
# FIXME: it's lame that i have to set these options directly.
printer._options.pixel_tests = True
printer._options.new_baseline = True
printer._options.time_out_ms = 6000
printer._options.slow_time_out_ms = 12000
printer.print_config('/tmp')
- self.assertTrue("Using port 'test-mac-leopard'" in err.getvalue())
- self.assertTrue('Test configuration: <leopard, x86, release>' in err.getvalue())
- self.assertTrue('Placing test results in /tmp' in err.getvalue())
- self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
- self.assertTrue('Using Release build' in err.getvalue())
- self.assertTrue('Pixel tests enabled' in err.getvalue())
- self.assertTrue('Command line:' in err.getvalue())
- self.assertTrue('Regular timeout: ' in err.getvalue())
+ self.assertIn("Using port 'test-mac-leopard'", err.getvalue())
+ self.assertIn('Test configuration: <leopard, x86, release>', err.getvalue())
+ self.assertIn('Placing test results in /tmp', err.getvalue())
+ self.assertIn('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic', err.getvalue())
+ self.assertIn('Using Release build', err.getvalue())
+ self.assertIn('Pixel tests enabled', err.getvalue())
+ self.assertIn('Command line:', err.getvalue())
+ self.assertIn('Regular timeout: ', err.getvalue())
self.reset(err)
printer._options.quiet = True
printer.print_config('/tmp')
- self.assertFalse('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
+ self.assertNotIn('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic', err.getvalue())
def test_print_one_line_summary(self):
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
printer._print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["The test ran as expected.\n", "\n"])
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
printer._print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["The test ran as expected.\n", "\n"])
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
printer._print_one_line_summary(2, 1, 1)
self.assertWritten(err, ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
printer._print_one_line_summary(3, 2, 1)
self.assertWritten(err, ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
- printer, err, out = self.get_printer()
+ printer, err = self.get_printer()
printer._print_one_line_summary(3, 2, 0)
self.assertWritten(err, ['\n', "2 tests ran as expected (1 didn't run).\n", '\n'])
- def test_print_unexpected_results(self):
- # This routine is the only one that prints stuff that the bots
- # care about.
- #
- # FIXME: there's some weird layering going on here. It seems
- # like we shouldn't be both using an expectations string and
- # having to specify whether or not the result was expected.
- # This whole set of tests should probably be rewritten.
- #
- # FIXME: Plus, the fact that we're having to call into
- # run_webkit_tests is clearly a layering inversion.
- def get_unexpected_results(expected, passing, flaky):
- """Return an unexpected results summary matching the input description.
-
- There are a lot of different combinations of test results that
- can be tested; this routine produces various combinations based
- on the values of the input flags.
-
- Args
- expected: whether the tests ran as expected
- passing: whether the tests should all pass
- flaky: whether the tests should be flaky (if False, they
- produce the same results on both runs; if True, they
- all pass on the second run).
-
- """
- test_is_slow = False
- paths, rs, exp = self.get_result_summary(tests, expectations)
- if expected:
- rs.add(self.get_result('passes/text.html', test_expectations.PASS), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/timeout.html', test_expectations.TIMEOUT), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.CRASH), expected, test_is_slow)
- elif passing:
- rs.add(self.get_result('passes/text.html'), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/timeout.html'), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/crash.html'), expected, test_is_slow)
- else:
- rs.add(self.get_result('passes/text.html', test_expectations.TIMEOUT), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/timeout.html', test_expectations.CRASH), expected, test_is_slow)
- rs.add(self.get_result('failures/expected/crash.html', test_expectations.TIMEOUT), expected, test_is_slow)
- retry = rs
- if flaky:
- paths, retry, exp = self.get_result_summary(tests, expectations)
- retry.add(self.get_result('passes/text.html'), True, test_is_slow)
- retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow)
- retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow)
- unexpected_results = manager.summarize_results(self._port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
- return unexpected_results
-
- tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html']
- expectations = ''
-
- printer, err, out = self.get_printer()
-
- # test everything running as expected
- ur = get_unexpected_results(expected=True, passing=False, flaky=False)
- printer._print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertEmpty(out)
-
- # test failures
- printer, err, out = self.get_printer()
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer._print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertNotEmpty(out)
-
- # test unexpected flaky
- printer, err, out = self.get_printer()
- ur = get_unexpected_results(expected=False, passing=False, flaky=True)
- printer._print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertNotEmpty(out)
-
- printer, err, out = self.get_printer()
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer._print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertNotEmpty(out)
-
- expectations = """
-BUGX : failures/expected/crash.html = CRASH
-BUGX : failures/expected/timeout.html = TIMEOUT
-"""
- printer, err, out = self.get_printer()
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer._print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertNotEmpty(out)
-
- printer, err, out = self.get_printer()
- ur = get_unexpected_results(expected=False, passing=True, flaky=False)
- printer._print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertNotEmpty(out)
-
- def test_print_unexpected_results_buildbot(self):
- # FIXME: Test that print_unexpected_results() produces the printer the
- # buildbot is expecting.
- pass
-
def test_test_status_line(self):
- printer, _, _ = self.get_printer()
+ printer, _ = self.get_printer()
printer._meter.number_of_columns = lambda: 80
actual = printer._test_status_line('fast/dom/HTMLFormElement/associated-elements-after-index-assertion-fail1.html', ' passed')
self.assertEqual(80, len(actual))
@@ -282,12 +170,18 @@ BUGX : failures/expected/timeout.html = TIMEOUT
self.assertEqual(actual, '[0/0] associated-elements-after-index-assertion-fail1.html passed')
def test_details(self):
- printer, err, _ = self.get_printer(['--details'])
+ printer, err = self.get_printer(['--details'])
result = self.get_result('passes/image.html')
printer.print_started_test('passes/image.html')
printer.print_finished_test(result, expected=False, exp_str='', got_str='')
self.assertNotEmpty(err)
+ def test_print_found(self):
+ printer, err = self.get_printer()
-if __name__ == '__main__':
- unittest.main()
+ printer.print_found(100, 10, 1, 1)
+ self.assertWritten(err, ["Found 100 tests; running 10, skipping 90.\n"])
+
+ self.reset(err)
+ printer.print_found(100, 10, 2, 3)
+ self.assertWritten(err, ["Found 100 tests; running 10 (6 times each: --repeat-each=2 --iterations=3), skipping 90.\n"])
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 41115e41d..0df3cc0ea 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
# Copyright (C) 2012 Zoltan Horvath, Adobe Systems Incorporated. All rights reserved.
#
@@ -46,37 +45,166 @@ if sys.platform not in ('cygwin', 'win32'):
import webkitpy.thirdparty.autoinstalled.webpagereplay.replay
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
-from webkitpy.layout_tests.port.driver import DriverInput
-from webkitpy.layout_tests.port.driver import DriverOutput
+from webkitpy.port.driver import DriverInput
+from webkitpy.port.driver import DriverOutput
+DEFAULT_TEST_RUNNER_COUNT = 4
_log = logging.getLogger(__name__)
+class PerfTestMetric(object):
+ def __init__(self, metric, unit=None, iterations=None):
+ # FIXME: Fix runner.js to report correct metric names
+ self._iterations = iterations or []
+ self._unit = unit or self.metric_to_unit(metric)
+ self._metric = self.time_unit_to_metric(self._unit) if metric == 'Time' else metric
+
+ def name(self):
+ return self._metric
+
+ def has_values(self):
+ return bool(self._iterations)
+
+ def append_group(self, group_values):
+ assert isinstance(group_values, list)
+ self._iterations.append(group_values)
+
+ def grouped_iteration_values(self):
+ return self._iterations
+
+ def flattened_iteration_values(self):
+ return [value for group_values in self._iterations for value in group_values]
+
+ def unit(self):
+ return self._unit
+
+ @staticmethod
+ def metric_to_unit(metric):
+ assert metric in ('Time', 'Malloc', 'JSHeap')
+ return 'ms' if metric == 'Time' else 'bytes'
+
+ @staticmethod
+ def time_unit_to_metric(unit):
+ return {'fps': 'FrameRate', 'runs/s': 'Runs', 'ms': 'Time'}[unit]
+
+
class PerfTest(object):
- def __init__(self, port, test_name, path_or_url):
+
+ def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
self._port = port
self._test_name = test_name
- self._path_or_url = path_or_url
+ self._test_path = test_path
+ self._description = None
+ self._metrics = {}
+ self._ordered_metrics_name = []
+ self._test_runner_count = test_runner_count
def test_name(self):
return self._test_name
- def path_or_url(self):
- return self._path_or_url
+ def test_name_without_file_extension(self):
+ return re.sub(r'\.\w+$', '', self.test_name())
+
+ def test_path(self):
+ return self._test_path
+
+ def description(self):
+ return self._description
def prepare(self, time_out_ms):
return True
- def run(self, driver, time_out_ms):
- output = self.run_single(driver, self.path_or_url(), time_out_ms)
- self._filter_stderr(output)
+ def _create_driver(self):
+ return self._port.create_driver(worker_number=0, no_timeout=True)
+
+ def run(self, time_out_ms):
+ for _ in xrange(self._test_runner_count):
+ driver = self._create_driver()
+ try:
+ if not self._run_with_driver(driver, time_out_ms):
+ return None
+ finally:
+ driver.stop()
+
+ should_log = not self._port.get_option('profile')
+ if should_log and self._description:
+ _log.info('DESCRIPTION: %s' % self._description)
+
+ results = {}
+ for metric_name in self._ordered_metrics_name:
+ metric = self._metrics[metric_name]
+ results[metric.name()] = metric.grouped_iteration_values()
+ if should_log:
+ legacy_chromium_bot_compatible_name = self.test_name_without_file_extension().replace('/', ': ')
+ self.log_statistics(legacy_chromium_bot_compatible_name + ': ' + metric.name(),
+ metric.flattened_iteration_values(), metric.unit())
+
+ return results
+
+ @staticmethod
+ def log_statistics(test_name, values, unit):
+ sorted_values = sorted(values)
+
+ # Compute the mean and variance using Knuth's online algorithm (has good numerical stability).
+ square_sum = 0
+ mean = 0
+ for i, time in enumerate(sorted_values):
+ delta = time - mean
+ sweep = i + 1.0
+ mean += delta / sweep
+ square_sum += delta * (time - mean)
+
+ middle = int(len(sorted_values) / 2)
+ mean = sum(sorted_values) / len(values)
+ median = sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2
+ stdev = math.sqrt(square_sum / (len(sorted_values) - 1)) if len(sorted_values) > 1 else 0
+
+ _log.info('RESULT %s= %s %s' % (test_name, mean, unit))
+ _log.info('median= %s %s, stdev= %s %s, min= %s %s, max= %s %s' %
+ (median, unit, stdev, unit, sorted_values[0], unit, sorted_values[-1], unit))
+
+ _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
+ _metrics_regex = re.compile(r'^(?P<metric>Time|Malloc|JS Heap):')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
+
+ def _run_with_driver(self, driver, time_out_ms):
+ output = self.run_single(driver, self.test_path(), time_out_ms)
+ self._filter_output(output)
if self.run_failed(output):
- return None
- return self.parse_output(output)
+ return False
+
+ current_metric = None
+ for line in re.split('\n', output.text):
+ description_match = self._description_regex.match(line)
+ metric_match = self._metrics_regex.match(line)
+ score = self._score_regex.match(line)
+
+ if description_match:
+ self._description = description_match.group('description')
+ elif metric_match:
+ current_metric = metric_match.group('metric').replace(' ', '')
+ elif score:
+ if score.group('key') != 'values':
+ continue
+
+ metric = self._ensure_metrics(current_metric, score.group('unit'))
+ metric.append_group(map(lambda value: float(value), score.group('value').split(', ')))
+ else:
+ _log.error('ERROR: ' + line)
+ return False
+
+ return True
+
+ def _ensure_metrics(self, metric_name, unit=None):
+ if metric_name not in self._metrics:
+ self._metrics[metric_name] = PerfTestMetric(metric_name, unit)
+ self._ordered_metrics_name.append(metric_name)
+ return self._metrics[metric_name]
- def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False):
- return driver.run_test(DriverInput(path_or_url, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False)
+ def run_single(self, driver, test_path, time_out_ms, should_run_pixel_test=False):
+ return driver.run_test(DriverInput(test_path, time_out_ms, image_hash=None, should_run_pixel_test=should_run_pixel_test), stop_when_done=False)
def run_failed(self, output):
if output.text == None or output.error:
@@ -93,7 +221,8 @@ class PerfTest(object):
return True
- def _should_ignore_line(self, regexps, line):
+ @staticmethod
+ def _should_ignore_line(regexps, line):
if not line:
return True
for regexp in regexps:
@@ -105,17 +234,11 @@ class PerfTest(object):
re.compile(r'^Unknown option:'),
re.compile(r'^\[WARNING:proxy_service.cc'),
re.compile(r'^\[INFO:'),
+ # These stderr messages come from content_shell on chromium-linux.
+ re.compile(r'INFO:SkFontHost_fontconfig.cpp'),
+ re.compile(r'Running without the SUID sandbox'),
]
- def _should_ignore_line_in_stderr(self, line):
- return self._should_ignore_line(self._lines_to_ignore_in_stderr, line)
-
- def _filter_stderr(self, output):
- if not output.error:
- return
- filtered_error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line_in_stderr(line)])
- output.error = filtered_error if filtered_error else None
-
_lines_to_ignore_in_parser_result = [
re.compile(r'^Running \d+ times$'),
re.compile(r'^Ignoring warm-up '),
@@ -126,163 +249,23 @@ class PerfTest(object):
re.compile(re.escape("""frame "<!--framePath //<!--frame0-->-->" - has 1 onunload handler(s)""")),
re.compile(re.escape("""frame "<!--framePath //<!--frame0-->/<!--frame0-->-->" - has 1 onunload handler(s)""")),
# Following is for html5.html
- re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/"""))]
-
- def _should_ignore_line_in_parser_test_result(self, line):
- return self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)
-
- _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
- _result_classes = ['Time', 'JS Heap', 'Malloc']
- _result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit', 'values']
- _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>([0-9\.]+(,\s+)?)+)\s*(?P<unit>.*)')
-
- def parse_output(self, output):
- test_failed = False
- results = {}
- ordered_results_keys = []
- test_name = re.sub(r'\.\w+$', '', self._test_name)
- description_string = ""
- result_class = ""
- for line in re.split('\n', output.text):
- description = self._description_regex.match(line)
- if description:
- description_string = description.group('description')
- continue
-
- result_class_match = self._result_class_regex.match(line)
- if result_class_match:
- result_class = result_class_match.group('resultclass')
- continue
-
- score = self._score_regex.match(line)
- if score:
- key = score.group('key')
- if key == 'values':
- value = [float(number) for number in score.group('value').split(', ')]
- else:
- value = float(score.group('value'))
- unit = score.group('unit')
- name = test_name
- if result_class != 'Time':
- name += ':' + result_class.replace(' ', '')
- if name not in ordered_results_keys:
- ordered_results_keys.append(name)
- results.setdefault(name, {})
- results[name]['unit'] = unit
- results[name][key] = value
- continue
-
- if not self._should_ignore_line_in_parser_test_result(line):
- test_failed = True
- _log.error(line)
-
- if test_failed:
- return None
-
- if set(self._statistics_keys) != set(results[test_name].keys() + ['values']):
- # values is not provided by Dromaeo tests.
- _log.error("The test didn't report all statistics.")
- return None
-
- if not self._port.get_option('profile'):
- for result_name in ordered_results_keys:
- if result_name == test_name:
- self.output_statistics(result_name, results[result_name], description_string)
- else:
- self.output_statistics(result_name, results[result_name])
- return results
-
- def output_statistics(self, test_name, results, description_string=None):
- unit = results['unit']
- if description_string:
- _log.info('DESCRIPTION: %s' % description_string)
- _log.info('RESULT %s= %s %s' % (test_name.replace(':', ': ').replace('/', ': '), results['avg'], unit))
- _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:5]]))
-
-
-class ChromiumStylePerfTest(PerfTest):
- _chromium_style_result_regex = re.compile(r'^RESULT\s+(?P<name>[^=]+)\s*=\s+(?P<value>\d+(\.\d+)?)\s*(?P<unit>\w+)$')
-
- def __init__(self, port, test_name, path_or_url):
- super(ChromiumStylePerfTest, self).__init__(port, test_name, path_or_url)
-
- def parse_output(self, output):
- test_failed = False
- results = {}
- for line in re.split('\n', output.text):
- resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
- if resultLine:
- # FIXME: Store the unit
- results[self.test_name() + ':' + resultLine.group('name').replace(' ', '')] = float(resultLine.group('value'))
- _log.info(line)
- elif not len(line) == 0:
- test_failed = True
- _log.error(line)
- return results if results and not test_failed else None
-
-
-class PageLoadingPerfTest(PerfTest):
- _FORCE_GC_FILE = 'resources/force-gc.html'
-
- def __init__(self, port, test_name, path_or_url):
- super(PageLoadingPerfTest, self).__init__(port, test_name, path_or_url)
- self.force_gc_test = self._port.host.filesystem.join(self._port.perf_tests_dir(), self._FORCE_GC_FILE)
-
- def run_single(self, driver, path_or_url, time_out_ms, should_run_pixel_test=False):
- # Force GC to prevent pageload noise. See https://bugs.webkit.org/show_bug.cgi?id=98203
- super(PageLoadingPerfTest, self).run_single(driver, self.force_gc_test, time_out_ms, False)
- return super(PageLoadingPerfTest, self).run_single(driver, path_or_url, time_out_ms, should_run_pixel_test)
-
- def calculate_statistics(self, values):
- sorted_values = sorted(values)
-
- # Compute the mean and variance using Knuth's online algorithm (has good numerical stability).
- squareSum = 0
- mean = 0
- for i, time in enumerate(sorted_values):
- delta = time - mean
- sweep = i + 1.0
- mean += delta / sweep
- squareSum += delta * (time - mean)
-
- middle = int(len(sorted_values) / 2)
- result = {'avg': mean,
- 'min': sorted_values[0],
- 'max': sorted_values[-1],
- 'median': sorted_values[middle] if len(sorted_values) % 2 else (sorted_values[middle - 1] + sorted_values[middle]) / 2,
- 'stdev': math.sqrt(squareSum / (len(sorted_values) - 1))}
- return result
-
- def run(self, driver, time_out_ms):
- results = {}
- results.setdefault(self.test_name(), {'unit': 'ms', 'values': []})
-
- for i in range(0, 20):
- output = self.run_single(driver, self.path_or_url(), time_out_ms)
- if not output or self.run_failed(output):
- return None
- if i == 0:
- continue
-
- results[self.test_name()]['values'].append(output.test_time * 1000)
-
- if not output.measurements:
- continue
+ re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/""")),
+ re.compile(r"CONSOLE MESSAGE: (line \d+: )?Blocked script execution in '[A-Za-z0-9\-\.:]+' because the document's frame is sandboxed and the 'allow-scripts' permission is not set."),
+ re.compile(r"CONSOLE MESSAGE: (line \d+: )?Not allowed to load local resource"),
+ # Dromaeo reports values for subtests. Ignore them for now.
+ re.compile(r'(?P<name>.+): \[(?P<values>(\d+(.\d+)?,\s+)*\d+(.\d+)?)\]'),
+ ]
- for result_class, result in output.measurements.items():
- name = self.test_name() + ':' + result_class
- if not name in results:
- results.setdefault(name, {'values': []})
- results[name]['values'].append(result)
- if result_class == 'Malloc' or result_class == 'JSHeap':
- results[name]['unit'] = 'bytes'
+ def _filter_output(self, output):
+ if output.error:
+ output.error = '\n'.join([line for line in re.split('\n', output.error) if not self._should_ignore_line(self._lines_to_ignore_in_stderr, line)])
+ if output.text:
+ output.text = '\n'.join([line for line in re.split('\n', output.text) if not self._should_ignore_line(self._lines_to_ignore_in_parser_result, line)])
- for result_class in results.keys():
- results[result_class].update(self.calculate_statistics(results[result_class]['values']))
- self.output_statistics(result_class, results[result_class], '')
- return results
+class SingleProcessPerfTest(PerfTest):
+ def __init__(self, port, test_name, test_path, test_runner_count=1):
+ super(SingleProcessPerfTest, self).__init__(port, test_name, test_path, test_runner_count)
class ReplayServer(object):
@@ -320,9 +303,12 @@ class ReplayServer(object):
self.stop()
-class ReplayPerfTest(PageLoadingPerfTest):
- def __init__(self, port, test_name, path_or_url):
- super(ReplayPerfTest, self).__init__(port, test_name, path_or_url)
+class ReplayPerfTest(PerfTest):
+ _FORCE_GC_FILE = 'resources/force-gc.html'
+
+ def __init__(self, port, test_name, test_path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
+ super(ReplayPerfTest, self).__init__(port, test_name, test_path, test_runner_count)
+ self.force_gc_test = self._port.host.filesystem.join(self._port.perf_tests_dir(), self._FORCE_GC_FILE)
def _start_replay_server(self, archive, record):
try:
@@ -335,11 +321,11 @@ class ReplayPerfTest(PageLoadingPerfTest):
def prepare(self, time_out_ms):
filesystem = self._port.host.filesystem
- path_without_ext = filesystem.splitext(self.path_or_url())[0]
+ path_without_ext = filesystem.splitext(self.test_path())[0]
self._archive_path = filesystem.join(path_without_ext + '.wpr')
self._expected_image_path = filesystem.join(path_without_ext + '-expected.png')
- self._url = filesystem.read_text_file(self.path_or_url()).split('\n')[0]
+ self._url = filesystem.read_text_file(self.test_path()).split('\n')[0]
if filesystem.isfile(self._archive_path) and filesystem.isfile(self._expected_image_path):
_log.info("Replay ready for %s" % self._archive_path)
@@ -361,6 +347,39 @@ class ReplayPerfTest(PageLoadingPerfTest):
return True
+ def _run_with_driver(self, driver, time_out_ms):
+ times = []
+ malloc = []
+ js_heap = []
+
+ for i in range(0, 6):
+ output = self.run_single(driver, self.test_path(), time_out_ms)
+ if not output or self.run_failed(output):
+ return False
+ if i == 0:
+ continue
+
+ times.append(output.test_time * 1000)
+
+ if not output.measurements:
+ continue
+
+ for metric, result in output.measurements.items():
+ assert metric == 'Malloc' or metric == 'JSHeap'
+ if metric == 'Malloc':
+ malloc.append(result)
+ else:
+ js_heap.append(result)
+
+ if times:
+ self._ensure_metrics('Time').append_group(times)
+ if malloc:
+ self._ensure_metrics('Malloc').append_group(malloc)
+ if js_heap:
+ self._ensure_metrics('JSHeap').append_group(js_heap)
+
+ return True
+
def run_single(self, driver, url, time_out_ms, record=False):
server = self._start_replay_server(self._archive_path, record)
if not server:
@@ -374,6 +393,8 @@ class ReplayPerfTest(PageLoadingPerfTest):
return None
_log.debug("Web page replay started. Loading the page.")
+ # Force GC to prevent pageload noise. See https://bugs.webkit.org/show_bug.cgi?id=98203
+ super(ReplayPerfTest, self).run_single(driver, self.force_gc_test, time_out_ms, False)
output = super(ReplayPerfTest, self).run_single(driver, self._url, time_out_ms, should_run_pixel_test=True)
if self.run_failed(output):
return None
@@ -400,13 +421,13 @@ class ReplayPerfTest(PageLoadingPerfTest):
class PerfTestFactory(object):
_pattern_map = [
- (re.compile(r'^inspector/'), ChromiumStylePerfTest),
+ (re.compile(r'^Dromaeo/'), SingleProcessPerfTest),
(re.compile(r'(.+)\.replay$'), ReplayPerfTest),
]
@classmethod
- def create_perf_test(cls, port, test_name, path):
+ def create_perf_test(cls, port, test_name, path, test_runner_count=DEFAULT_TEST_RUNNER_COUNT):
for (pattern, test_class) in cls._pattern_map:
if pattern.match(test_name):
- return test_class(port, test_name, path)
- return PerfTest(port, test_name, path)
+ return test_class(port, test_name, path, test_runner_count)
+ return PerfTest(port, test_name, path, test_runner_count)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
index 741e8b644..b904b9760 100755..100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -28,171 +27,211 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
+import json
import math
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.port.driver import DriverOutput
-from webkitpy.layout_tests.port.test import TestDriver
-from webkitpy.layout_tests.port.test import TestPort
-from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
-from webkitpy.performance_tests.perftest import PageLoadingPerfTest
+from webkitpy.port.driver import DriverOutput
+from webkitpy.port.test import TestDriver
+from webkitpy.port.test import TestPort
from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftest import PerfTestMetric
from webkitpy.performance_tests.perftest import PerfTestFactory
from webkitpy.performance_tests.perftest import ReplayPerfTest
+from webkitpy.performance_tests.perftest import SingleProcessPerfTest
class MockPort(TestPort):
def __init__(self, custom_run_test=None):
super(MockPort, self).__init__(host=MockHost(), custom_run_test=custom_run_test)
-class MainTest(unittest.TestCase):
+
+class TestPerfTestMetric(unittest.TestCase):
+ def test_init_set_missing_unit(self):
+ self.assertEqual(PerfTestMetric('Time', iterations=[1, 2, 3, 4, 5]).unit(), 'ms')
+ self.assertEqual(PerfTestMetric('Malloc', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
+ self.assertEqual(PerfTestMetric('JSHeap', iterations=[1, 2, 3, 4, 5]).unit(), 'bytes')
+
+ def test_init_set_time_metric(self):
+ self.assertEqual(PerfTestMetric('Time', 'ms').name(), 'Time')
+ self.assertEqual(PerfTestMetric('Time', 'fps').name(), 'FrameRate')
+ self.assertEqual(PerfTestMetric('Time', 'runs/s').name(), 'Runs')
+
+ def test_has_values(self):
+ self.assertFalse(PerfTestMetric('Time').has_values())
+ self.assertTrue(PerfTestMetric('Time', iterations=[1]).has_values())
+
+ def test_append(self):
+ metric = PerfTestMetric('Time')
+ metric2 = PerfTestMetric('Time')
+ self.assertFalse(metric.has_values())
+ self.assertFalse(metric2.has_values())
+
+ metric.append_group([1])
+ self.assertTrue(metric.has_values())
+ self.assertFalse(metric2.has_values())
+ self.assertEqual(metric.grouped_iteration_values(), [[1]])
+ self.assertEqual(metric.flattened_iteration_values(), [1])
+
+ metric.append_group([2])
+ self.assertEqual(metric.grouped_iteration_values(), [[1], [2]])
+ self.assertEqual(metric.flattened_iteration_values(), [1, 2])
+
+ metric2.append_group([3])
+ self.assertTrue(metric2.has_values())
+ self.assertEqual(metric.flattened_iteration_values(), [1, 2])
+ self.assertEqual(metric2.flattened_iteration_values(), [3])
+
+ metric.append_group([4, 5])
+ self.assertEqual(metric.grouped_iteration_values(), [[1], [2], [4, 5]])
+ self.assertEqual(metric.flattened_iteration_values(), [1, 2, 4, 5])
+
+
+class TestPerfTest(unittest.TestCase):
+ def _assert_results_are_correct(self, test, output):
+ test.run_single = lambda driver, path, time_out_ms: output
+ self.assertTrue(test._run_with_driver(None, None))
+ self.assertEqual(test._metrics.keys(), ['Time'])
+ self.assertEqual(test._metrics['Time'].flattened_iteration_values(), [1080, 1120, 1095, 1101, 1104])
+
def test_parse_output(self):
- output = DriverOutput('\n'.join([
- 'Running 20 times',
- 'Ignoring warm-up run (1115)',
- '',
- 'Time:',
- 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
- 'avg 1100 ms',
- 'median 1101 ms',
- 'stdev 11 ms',
- 'min 1080 ms',
- 'max 1120 ms']), image=None, image_hash=None, audio=None)
+ output = DriverOutput("""
+Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms
+""", image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
- self.assertEqual(test.parse_output(output),
- {'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms',
- 'values': [i for i in range(1, 20)]}})
+ self._assert_results_are_correct(test, output)
finally:
- pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
- self.assertEqual(actual_logs, 'RESULT some-test= 1100.0 ms\nmedian= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms\n')
+ self.assertEqual(actual_logs, '')
def test_parse_output_with_failing_line(self):
- output = DriverOutput('\n'.join([
- 'Running 20 times',
- 'Ignoring warm-up run (1115)',
- '',
- 'some-unrecognizable-line',
- '',
- 'Time:'
- 'values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms',
- 'avg 1100 ms',
- 'median 1101 ms',
- 'stdev 11 ms',
- 'min 1080 ms',
- 'max 1120 ms']), image=None, image_hash=None, audio=None)
+ output = DriverOutput("""
+Running 20 times
+Ignoring warm-up run (1115)
+
+some-unrecognizable-line
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms
+""", image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
- self.assertEqual(test.parse_output(output), None)
+ test.run_single = lambda driver, path, time_out_ms: output
+ self.assertFalse(test._run_with_driver(None, None))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
- self.assertEqual(actual_logs, 'some-unrecognizable-line\n')
+ self.assertEqual(actual_logs, 'ERROR: some-unrecognizable-line\n')
+
+ def test_parse_output_with_description(self):
+ output = DriverOutput("""
+Description: this is a test description.
+
+Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms""", image=None, image_hash=None, audio=None)
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ self._assert_results_are_correct(test, output)
+ self.assertEqual(test.description(), 'this is a test description.')
def test_ignored_stderr_lines(self):
test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
- ignored_lines = [
- "Unknown option: --foo-bar",
- "[WARNING:proxy_service.cc] bad moon a-rising",
- "[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/",
- ]
- for line in ignored_lines:
- self.assertTrue(test._should_ignore_line_in_stderr(line))
-
- non_ignored_lines = [
- "Should not be ignored",
- "[WARNING:chrome.cc] Something went wrong",
- "[ERROR:main.cc] The sky has fallen",
- ]
- for line in non_ignored_lines:
- self.assertFalse(test._should_ignore_line_in_stderr(line))
-
-
-class TestPageLoadingPerfTest(unittest.TestCase):
- class MockDriver(object):
- def __init__(self, values, test, measurements=None):
- self._values = values
- self._index = 0
- self._test = test
- self._measurements = measurements
-
- def run_test(self, input, stop_when_done):
- if input.test_name == self._test.force_gc_test:
- return
- value = self._values[self._index]
- self._index += 1
- if isinstance(value, str):
- return DriverOutput('some output', image=None, image_hash=None, audio=None, error=value)
- else:
- return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=self._values[self._index - 1], measurements=self._measurements)
-
- def test_run(self):
- port = MockPort()
- test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
- driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test)
+ output_with_lines_to_ignore = DriverOutput('', image=None, image_hash=None, audio=None, error="""
+Unknown option: --foo-bar
+Should not be ignored
+[WARNING:proxy_service.cc] bad moon a-rising
+[WARNING:chrome.cc] Something went wrong
+[INFO:SkFontHost_android.cpp(1158)] Use Test Config File Main /data/local/tmp/drt/android_main_fonts.xml, Fallback /data/local/tmp/drt/android_fallback_fonts.xml, Font Dir /data/local/tmp/drt/fonts/
+[ERROR:main.cc] The sky has fallen""")
+ test._filter_output(output_with_lines_to_ignore)
+ self.assertEqual(output_with_lines_to_ignore.error,
+ "Should not be ignored\n"
+ "[WARNING:chrome.cc] Something went wrong\n"
+ "[ERROR:main.cc] The sky has fallen")
+
+ def test_parse_output_with_subtests(self):
+ output = DriverOutput("""
+Running 20 times
+some test: [1, 2, 3, 4, 5]
+other test = else: [6, 7, 8, 9, 10]
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms
+""", image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
- self.assertEqual(test.run(driver, None),
- {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
- 'values': [i * 1000 for i in range(2, 21)]}})
+ test = PerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ self._assert_results_are_correct(test, output)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
- self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n')
+ self.assertEqual(actual_logs, '')
- def test_run_with_memory_output(self):
- port = MockPort()
- test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
- memory_results = {'Malloc': 10, 'JSHeap': 5}
- self.maxDiff = None
- driver = TestPageLoadingPerfTest.MockDriver(range(1, 21), test, memory_results)
- output_capture = OutputCapture()
- output_capture.capture_output()
- try:
- self.assertEqual(test.run(driver, None),
- {'some-test': {'max': 20000, 'avg': 11000.0, 'median': 11000, 'stdev': 5627.314338711378, 'min': 2000, 'unit': 'ms',
- 'values': [i * 1000 for i in range(2, 21)]},
- 'some-test:Malloc': {'max': 10, 'avg': 10.0, 'median': 10, 'min': 10, 'stdev': 0.0, 'unit': 'bytes',
- 'values': [10] * 19},
- 'some-test:JSHeap': {'max': 5, 'avg': 5.0, 'median': 5, 'min': 5, 'stdev': 0.0, 'unit': 'bytes',
- 'values': [5] * 19}})
- finally:
- actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
- self.assertEqual(actual_stdout, '')
- self.assertEqual(actual_stderr, '')
- self.assertEqual(actual_logs, 'RESULT some-test= 11000.0 ms\nmedian= 11000 ms, stdev= 5627.31433871 ms, min= 2000 ms, max= 20000 ms\n'
- + 'RESULT some-test: Malloc= 10.0 bytes\nmedian= 10 bytes, stdev= 0.0 bytes, min= 10 bytes, max= 10 bytes\n'
- + 'RESULT some-test: JSHeap= 5.0 bytes\nmedian= 5 bytes, stdev= 0.0 bytes, min= 5 bytes, max= 5 bytes\n')
- def test_run_with_bad_output(self):
- output_capture = OutputCapture()
- output_capture.capture_output()
- try:
- port = MockPort()
- test = PageLoadingPerfTest(port, 'some-test', '/path/some-dir/some-test')
- driver = TestPageLoadingPerfTest.MockDriver([1, 2, 3, 4, 5, 6, 7, 'some error', 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20], test)
- self.assertEqual(test.run(driver, None), None)
- finally:
- actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
- self.assertEqual(actual_stdout, '')
- self.assertEqual(actual_stderr, '')
- self.assertEqual(actual_logs, 'error: some-test\nsome error\n')
+class TestSingleProcessPerfTest(unittest.TestCase):
+ def test_use_only_one_process(self):
+ called = [0]
+ def run_single(driver, path, time_out_ms):
+ called[0] += 1
+ return DriverOutput("""
+Running 20 times
+Ignoring warm-up run (1115)
-class TestReplayPerfTest(unittest.TestCase):
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50862 ms
+min 1080 ms
+max 1120 ms""", image=None, image_hash=None, audio=None)
+ test = SingleProcessPerfTest(MockPort(), 'some-test', '/path/some-dir/some-test')
+ test.run_single = run_single
+ self.assertTrue(test.run(0))
+ self.assertEqual(called[0], 1)
+
+
+class TestReplayPerfTest(unittest.TestCase):
class ReplayTestPort(MockPort):
def __init__(self, custom_run_test=None):
@@ -239,7 +278,7 @@ class TestReplayPerfTest(unittest.TestCase):
loaded_pages.append(test_input)
self._add_file(port, '/path/some-dir', 'some-test.wpr', 'wpr content')
return DriverOutput('actual text', 'actual image', 'actual checksum',
- audio=None, crash=False, timeout=False, error=False)
+ audio=None, crash=False, timeout=False, error=False, test_time=12345)
test, port = self._setup_test(run_test)
test._archive_path = '/path/some-dir/some-test.wpr'
@@ -247,7 +286,8 @@ class TestReplayPerfTest(unittest.TestCase):
try:
driver = port.create_driver(worker_number=1, no_timeout=True)
- self.assertTrue(test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100))
+ output = test.run_single(driver, '/path/some-dir/some-test.replay', time_out_ms=100)
+ self.assertTrue(output)
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
@@ -258,6 +298,7 @@ class TestReplayPerfTest(unittest.TestCase):
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, '')
self.assertEqual(port.host.filesystem.read_binary_file('/path/some-dir/some-test-actual.png'), 'actual image')
+ self.assertEqual(output.test_time, 12345)
def test_run_single_fails_without_webpagereplay(self):
output_capture = OutputCapture()
@@ -277,6 +318,59 @@ class TestReplayPerfTest(unittest.TestCase):
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, "Web page replay didn't start.\n")
+ def test_run_with_driver_accumulates_results(self):
+ port = MockPort()
+ test, port = self._setup_test()
+ counter = [0]
+
+ def mock_run_signle(drive, path, timeout):
+ counter[0] += 1
+ return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={})
+
+ test.run_single = mock_run_signle
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ driver = port.create_driver(worker_number=1, no_timeout=True)
+ self.assertTrue(test._run_with_driver(driver, None))
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, '')
+
+ self.assertEqual(test._metrics.keys(), ['Time'])
+ self.assertEqual(test._metrics['Time'].flattened_iteration_values(), [float(i * 1000) for i in range(2, 7)])
+
+ def test_run_with_driver_accumulates_memory_results(self):
+ port = MockPort()
+ test, port = self._setup_test()
+ counter = [0]
+
+ def mock_run_signle(drive, path, timeout):
+ counter[0] += 1
+ return DriverOutput('some output', image=None, image_hash=None, audio=None, test_time=counter[0], measurements={'Malloc': 10, 'JSHeap': 5})
+
+ test.run_single = mock_run_signle
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ driver = port.create_driver(worker_number=1, no_timeout=True)
+ self.assertTrue(test._run_with_driver(driver, None))
+ finally:
+ actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
+
+ self.assertEqual(actual_stdout, '')
+ self.assertEqual(actual_stderr, '')
+ self.assertEqual(actual_logs, '')
+
+ metrics = test._metrics
+ self.assertEqual(sorted(metrics.keys()), ['JSHeap', 'Malloc', 'Time'])
+ self.assertEqual(metrics['Time'].flattened_iteration_values(), [float(i * 1000) for i in range(2, 7)])
+ self.assertEqual(metrics['Malloc'].flattened_iteration_values(), [float(10)] * 5)
+ self.assertEqual(metrics['JSHeap'].flattened_iteration_values(), [float(5)] * 5)
+
def test_prepare_fails_when_wait_until_ready_fails(self):
output_capture = OutputCapture()
output_capture.capture_output()
@@ -337,7 +431,7 @@ class TestReplayPerfTest(unittest.TestCase):
test, port = self._setup_test(run_test)
try:
- self.assertEqual(test.prepare(time_out_ms=100), True)
+ self.assertTrue(test.prepare(time_out_ms=100))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
@@ -361,7 +455,7 @@ class TestReplayPerfTest(unittest.TestCase):
test.run_single = run_single
try:
- self.assertEqual(test.prepare(time_out_ms=100), False)
+ self.assertFalse(test.prepare(time_out_ms=100))
finally:
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertTrue(called[0])
@@ -371,15 +465,8 @@ class TestReplayPerfTest(unittest.TestCase):
self.assertEqual(actual_stderr, '')
self.assertEqual(actual_logs, "Preparing replay for some-test.replay\nFailed to prepare a replay for some-test.replay\n")
+
class TestPerfTestFactory(unittest.TestCase):
def test_regular_test(self):
test = PerfTestFactory.create_perf_test(MockPort(), 'some-dir/some-test', '/path/some-dir/some-test')
self.assertEqual(test.__class__, PerfTest)
-
- def test_inspector_test(self):
- test = PerfTestFactory.create_perf_test(MockPort(), 'inspector/some-test', '/path/inspector/some-test')
- self.assertEqual(test.__class__, ChromiumStylePerfTest)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index 6dc4742b7..adfabffbd 100755..100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -34,12 +33,15 @@ import json
import logging
import optparse
import time
+import datetime
from webkitpy.common import find_files
from webkitpy.common.checkout.scm.detection import SCMDetector
+from webkitpy.common.config.urls import view_source_url
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.performance_tests.perftest import PerfTestFactory
+from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
_log = logging.getLogger(__name__)
@@ -66,10 +68,9 @@ class PerfTestsRunner(object):
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
- self._results = {}
self._timestamp = time.time()
- self._needs_http = None
- self._has_http_lock = False
+ self._utc_timestamp = datetime.datetime.utcnow()
+
@staticmethod
def _parse_args(args=None):
@@ -83,10 +84,6 @@ class PerfTestsRunner(object):
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
- optparse.make_option("--chromium",
- action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
- optparse.make_option("--chromium-android",
- action="store_const", const='chromium-android', dest='platform', help='Alias for --platform=chromium-android'),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
@@ -99,8 +96,6 @@ class PerfTestsRunner(object):
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
- optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
- help="Pause before running the tests to let user attach a performance monitor."),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
@@ -119,16 +114,25 @@ class PerfTestsRunner(object):
help="Use WebKitTestRunner rather than DumpRenderTree."),
optparse.make_option("--replay", dest="replay", action="store_true", default=False,
help="Run replay tests."),
- optparse.make_option("--force", dest="skipped", action="store_true", default=False,
+ optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
+ optparse.make_option("--profiler", action="store",
+ help="Output per-test profile information, using the specified profiler."),
+ optparse.make_option("--additional-drt-flag", action="append",
+ default=[], help="Additional command line flag to pass to DumpRenderTree "
+ "Specify multiple times to add multiple flags."),
+ optparse.make_option("--driver-name", type="string",
+ help="Alternative DumpRenderTree binary to use"),
+ optparse.make_option("--repeat", default=1, type="int",
+ help="Specify number of times to run test set (default: 1)."),
+ optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
+ help="Specify number of times to invoke test runner for each performance test."),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
- """Return the list of tests found."""
-
test_extensions = ['.html', '.svg']
if self._options.replay:
test_extensions.append('.replay')
@@ -140,59 +144,59 @@ class PerfTestsRunner(object):
paths = []
for arg in self._args:
- paths.append(arg)
- relpath = filesystem.relpath(arg, self._base_path)
- if relpath:
- paths.append(relpath)
+ if filesystem.exists(filesystem.join(self._base_path, arg)):
+ paths.append(arg)
+ else:
+ relpath = filesystem.relpath(arg, self._base_path)
+ if filesystem.exists(filesystem.join(self._base_path, relpath)):
+ paths.append(filesystem.normpath(relpath))
+ else:
+ _log.warn('Path was not found:' + arg)
skipped_directories = set(['.svn', 'resources'])
test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
tests = []
for path in test_files:
- relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
- if self._port.skips_perf_test(relative_path) and not self._options.skipped:
+ relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
+ if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
continue
- test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
+ test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=self._options.test_runner_count)
tests.append(test)
return tests
- def _start_servers(self):
- if self._needs_http:
- self._port.acquire_http_lock()
- self._port.start_http_server(number_of_servers=2)
- self._has_http_lock = True
-
- def _stop_servers(self):
- if self._has_http_lock:
- self._port.stop_http_server()
- self._port.release_http_lock()
-
def run(self):
- self._needs_http = self._port.requires_http_server()
-
- if not self._port.check_build(needs_http=self._needs_http):
+ if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self.EXIT_CODE_BAD_BUILD
- tests = self._collect_tests()
- _log.info("Running %d tests" % len(tests))
+ run_count = 0
+ repeat = self._options.repeat
+ while (run_count < repeat):
+ run_count += 1
- for test in tests:
- if not test.prepare(self._options.time_out_ms):
- return self.EXIT_CODE_BAD_PREPARATION
+ tests = self._collect_tests()
+ runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
+ _log.info("Running %d tests%s" % (len(tests), runs))
- try:
- self._start_servers()
- unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
+ for test in tests:
+ if not test.prepare(self._options.time_out_ms):
+ return self.EXIT_CODE_BAD_PREPARATION
+
+ unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
- finally:
- self._stop_servers()
+ if self._options.generate_results and not self._options.profile:
+ exit_code = self._generate_results()
+ if exit_code:
+ return exit_code
if self._options.generate_results and not self._options.profile:
- exit_code = self._generate_and_show_results()
- if exit_code:
- return exit_code
+ test_results_server = self._options.test_results_server
+ if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
+ return self.EXIT_CODE_FAILED_UPLOADING
+
+ if self._options.show_results:
+ self._port.show_results_html_file(self._results_page_path())
return unexpected
@@ -202,7 +206,10 @@ class PerfTestsRunner(object):
return output_json_path
return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
- def _generate_and_show_results(self):
+ def _results_page_path(self):
+ return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
+
+ def _generate_results(self):
options = self._options
output_json_path = self._output_json_path()
output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
@@ -216,33 +223,66 @@ class PerfTestsRunner(object):
if not output:
return self.EXIT_CODE_BAD_MERGE
- results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
- self._generate_output_files(output_json_path, results_page_path, output)
+ filesystem = self._host.filesystem
+ json_output = json.dumps(output)
+ filesystem.write_text_file(output_json_path, json_output)
- if options.test_results_server:
- if not self._upload_json(options.test_results_server, output_json_path):
- return self.EXIT_CODE_FAILED_UPLOADING
+ template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
+ template = filesystem.read_text_file(template_path)
- if options.show_results:
- self._port.show_results_html_file(results_page_path)
+ absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
+ results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
+ results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
+
+ filesystem.write_text_file(self._results_page_path(), results_page)
def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
- contents = {'results': self._results}
- if description:
- contents['description'] = description
+ revisions = {}
for (name, path) in self._port.repository_paths():
scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
- contents[name + '-revision'] = scm.svn_revision(path)
-
- # FIXME: Add --branch or auto-detect the branch we're in
- for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
- 'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
+ revision = scm.svn_revision(path)
+ revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
+
+ meta_info = {
+ 'description': description,
+ 'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
+ 'platform': platform,
+ 'revisions': revisions,
+ 'builderName': builder_name,
+ 'buildNumber': int(build_number) if build_number else None}
+
+ contents = {'tests': {}}
+ for key, value in meta_info.items():
if value:
contents[key] = value
+ for test, metrics in self._results:
+ for metric_name, iteration_values in metrics.iteritems():
+ if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
+ continue
+
+ tests = contents['tests']
+ path = test.test_name_without_file_extension().split('/')
+ for i in range(0, len(path)):
+ is_last_token = i + 1 == len(path)
+ url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
+ tests.setdefault(path[i], {'url': url})
+ current_test = tests[path[i]]
+ if is_last_token:
+ current_test.setdefault('metrics', {})
+ assert metric_name not in current_test['metrics']
+ current_test['metrics'][metric_name] = {'current': iteration_values}
+ else:
+ current_test.setdefault('tests', {})
+ tests = current_test['tests']
+
return contents
- def _merge_slave_config_json(self, slave_config_json_path, output):
+ @staticmethod
+ def _datetime_in_ES5_compatible_iso_format(datetime):
+ return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
+
+ def _merge_slave_config_json(self, slave_config_json_path, contents):
if not self._host.filesystem.isfile(slave_config_json_path):
_log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
return None
@@ -250,7 +290,9 @@ class PerfTestsRunner(object):
try:
slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
slave_config = json.load(slave_config_json)
- return dict(slave_config.items() + output.items())
+ for key in slave_config:
+ contents['builder' + key.capitalize()] = slave_config[key]
+ return contents
except Exception, error:
_log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
return None
@@ -265,85 +307,48 @@ class PerfTestsRunner(object):
_log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
return None
- def _generate_output_files(self, output_json_path, results_page_path, output):
- filesystem = self._host.filesystem
-
- json_output = json.dumps(output)
- filesystem.write_text_file(output_json_path, json_output)
-
- if results_page_path:
- template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
- template = filesystem.read_text_file(template_path)
-
- absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
- results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
- results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
-
- filesystem.write_text_file(results_page_path, results_page)
-
- def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
- uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
+ def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
+ url = "https://%s%s" % (test_results_server, host_path)
+ uploader = file_uploader(url, 120)
try:
response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
except Exception, error:
- _log.error("Failed to upload JSON file in 120s: %s" % error)
+ _log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
return False
response_body = [line.strip('\n') for line in response]
if response_body != ['OK']:
- _log.error("Uploaded JSON but got a bad response:")
- for line in response_body:
- _log.error(line)
- return False
-
- _log.info("JSON file uploaded.")
+ try:
+ parsed_response = json.loads('\n'.join(response_body))
+ except:
+ _log.error("Uploaded JSON to %s but got a bad response:" % url)
+ for line in response_body:
+ _log.error(line)
+ return False
+ if parsed_response.get('status') != 'OK':
+ _log.error("Uploaded JSON to %s but got an error:" % url)
+ _log.error(json.dumps(parsed_response, indent=4))
+ return False
+
+ _log.info("JSON file uploaded to %s." % url)
return True
- def _print_status(self, tests, expected, unexpected):
- if len(tests) == expected + unexpected:
- status = "Ran %d tests" % len(tests)
- else:
- status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
- if unexpected:
- status += " (%d didn't run)" % unexpected
- _log.info(status)
-
- def _run_tests_set(self, tests, port):
+ def _run_tests_set(self, tests):
result_count = len(tests)
- expected = 0
- unexpected = 0
- driver = None
-
- for test in tests:
- driver = port.create_driver(worker_number=0, no_timeout=True)
-
- if self._options.pause_before_testing:
- driver.start()
- if not self._host.user.confirm("Ready to run test?"):
- driver.stop()
- return unexpected
-
- _log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
- if self._run_single_test(test, driver):
- expected = expected + 1
+ failures = 0
+ self._results = []
+
+ for i, test in enumerate(tests):
+ _log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
+ start_time = time.time()
+ metrics = test.run(self._options.time_out_ms)
+ if metrics:
+ self._results.append((test, metrics))
else:
- unexpected = unexpected + 1
+ failures += 1
+ _log.error('FAILED')
+ _log.info('Finished: %f s' % (time.time() - start_time))
_log.info('')
- driver.stop()
-
- return unexpected
-
- def _run_single_test(self, test, driver):
- start_time = time.time()
-
- new_results = test.run(driver, self._options.time_out_ms)
- if new_results:
- self._results.update(new_results)
- else:
- _log.error('FAILED')
-
- _log.info("Finished: %f s" % (time.time() - start_time))
-
- return new_results != None
+ return failures
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py
new file mode 100644
index 000000000..a0832a9cd
--- /dev/null
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py
@@ -0,0 +1,562 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Integration tests for run_perf_tests."""
+
+import StringIO
+import datetime
+import json
+import re
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.port.driver import DriverOutput
+from webkitpy.port.test import TestPort
+from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
+
+
+class EventTargetWrapperTestData:
+ text = """Running 20 times
+Ignoring warm-up run (1502)
+1504
+1505
+1510
+1504
+1507
+1509
+1510
+1487
+1488
+1472
+1472
+1488
+1473
+1472
+1475
+1487
+1486
+1486
+1475
+1471
+
+Time:
+values 1486, 1471, 1510, 1505, 1478, 1490 ms
+avg 1490 ms
+median 1488 ms
+stdev 15.13935 ms
+min 1471 ms
+max 1510 ms
+"""
+
+ output = """Running Bindings/event-target-wrapper.html (1 of 2)
+RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
+median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
+Finished: 0.1 s
+
+"""
+
+ results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
+ 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
+
+
+class SomeParserTestData:
+ text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50861 ms
+min 1080 ms
+max 1120 ms
+"""
+
+ output = """Running Parser/some-parser.html (2 of 2)
+RESULT Parser: some-parser: Time= 1100.0 ms
+median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
+Finished: 0.1 s
+
+"""
+
+ results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/some-parser.html',
+ 'metrics': {'Time': {'current': [[1080.0, 1120.0, 1095.0, 1101.0, 1104.0]] * 4}}}
+
+
+class MemoryTestData:
+ text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+values 1080, 1120, 1095, 1101, 1104 ms
+avg 1100 ms
+median 1101 ms
+stdev 14.50861 ms
+min 1080 ms
+max 1120 ms
+
+JS Heap:
+values 825000, 811000, 848000, 837000, 829000 bytes
+avg 830000 bytes
+median 829000 bytes
+stdev 13784.04875 bytes
+min 811000 bytes
+max 848000 bytes
+
+Malloc:
+values 529000, 511000, 548000, 536000, 521000 bytes
+avg 529000 bytes
+median 529000 bytes
+stdev 14124.44689 bytes
+min 511000 bytes
+max 548000 bytes
+"""
+
+ output = """Running 1 tests
+Running Parser/memory-test.html (1 of 1)
+RESULT Parser: memory-test: Time= 1100.0 ms
+median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
+RESULT Parser: memory-test: JSHeap= 830000.0 bytes
+median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
+RESULT Parser: memory-test: Malloc= 529000.0 bytes
+median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
+Finished: 0.1 s
+"""
+
+ results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
+ js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
+ malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
+
+
+class TestDriver:
+ def run_test(self, driver_input, stop_when_done):
+ text = ''
+ timeout = False
+ crash = False
+ if driver_input.test_name.endswith('pass.html'):
+ text = SomeParserTestData.text
+ elif driver_input.test_name.endswith('timeout.html'):
+ timeout = True
+ elif driver_input.test_name.endswith('failed.html'):
+ text = None
+ elif driver_input.test_name.endswith('tonguey.html'):
+ text = 'we are not expecting an output from perf tests but RESULT blablabla'
+ elif driver_input.test_name.endswith('crash.html'):
+ crash = True
+ elif driver_input.test_name.endswith('event-target-wrapper.html'):
+ text = EventTargetWrapperTestData.text
+ elif driver_input.test_name.endswith('some-parser.html'):
+ text = SomeParserTestData.text
+ elif driver_input.test_name.endswith('memory-test.html'):
+ text = MemoryTestData.text
+ return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
+
+ def start(self):
+ """do nothing"""
+
+ def stop(self):
+ """do nothing"""
+
+
+class MainTest(unittest.TestCase):
+ def _normalize_output(self, log):
+ return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
+
+ def _load_output_json(self, runner):
+ json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
+ return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
+
+ def create_runner(self, args=[], driver_class=TestDriver):
+ options, parsed_args = PerfTestsRunner._parse_args(args)
+ test_port = TestPort(host=MockHost(), options=options)
+ test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
+
+ runner = PerfTestsRunner(args=args, port=test_port)
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
+ runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+
+ return runner, test_port
+
+ def run_test(self, test_name):
+ runner, port = self.create_runner()
+ tests = [PerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
+ return runner._run_tests_set(tests) == 0
+
+ def test_run_passing_test(self):
+ self.assertTrue(self.run_test('pass.html'))
+
+ def test_run_silent_test(self):
+ self.assertFalse(self.run_test('silent.html'))
+
+ def test_run_failed_test(self):
+ self.assertFalse(self.run_test('failed.html'))
+
+ def test_run_tonguey_test(self):
+ self.assertFalse(self.run_test('tonguey.html'))
+
+ def test_run_timeout_test(self):
+ self.assertFalse(self.run_test('timeout.html'))
+
+ def test_run_crash_test(self):
+ self.assertFalse(self.run_test('crash.html'))
+
+ def _tests_for_runner(self, runner, test_names):
+ filesystem = runner._host.filesystem
+ tests = []
+ for test in test_names:
+ path = filesystem.join(runner._base_path, test)
+ dirname = filesystem.dirname(path)
+ tests.append(PerfTest(runner._port, test, path))
+ return tests
+
+ def test_run_test_set_kills_drt_per_run(self):
+
+ class TestDriverWithStopCount(TestDriver):
+ stop_count = 0
+ def stop(self):
+ TestDriverWithStopCount.stop_count += 1
+
+ runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
+
+ tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
+ 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
+ unexpected_result_count = runner._run_tests_set(tests)
+
+ self.assertEqual(TestDriverWithStopCount.stop_count, 9)
+
+ def test_run_test_set_for_parser_tests(self):
+ runner, port = self.create_runner()
+ tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner._run_tests_set(tests)
+ finally:
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(unexpected_result_count, 0)
+ self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
+
+ def test_run_memory_test(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ runner._timestamp = 123456789
+ port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
+
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner.run()
+ finally:
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(unexpected_result_count, 0)
+ self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
+ parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
+ self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
+ self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
+ self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
+
+ def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
+ filesystem.write_text_file(runner._base_path + '/Parser/some-parser.html', 'some content')
+ filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
+
+ uploaded = [False]
+
+ def mock_upload_json(hostname, json_path, host_path=None):
+ # FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
+ self.assertIn(hostname, ['some.host'])
+ self.assertIn(json_path, ['/mock-checkout/output.json'])
+ self.assertIn(host_path, [None, '/api/report'])
+ uploaded[0] = upload_succeeds
+ return upload_succeeds
+
+ runner._upload_json = mock_upload_json
+ runner._timestamp = 123456789
+ runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
+ output_capture = OutputCapture()
+ output_capture.capture_output()
+ try:
+ self.assertEqual(runner.run(), expected_exit_code)
+ finally:
+ stdout, stderr, logs = output_capture.restore_output()
+
+ if not expected_exit_code and compare_logs:
+ expected_logs = ''
+ for i in xrange(repeat):
+ runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
+ expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + SomeParserTestData.output
+ if results_shown:
+ expected_logs += 'MOCK: user.open_url: file://...\n'
+ self.assertEqual(self._normalize_output(logs), expected_logs)
+
+ self.assertEqual(uploaded[0], upload_succeeds)
+
+ return logs
+
+ _event_target_wrapper_and_inspector_results = {
+ "Bindings":
+ {"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings",
+ "tests": {"event-target-wrapper": EventTargetWrapperTestData.results}},
+ "Parser":
+ {"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Parser",
+ "tests": {"some-parser": SomeParserTestData.results}}}
+
+ def test_run_with_json_output(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ filesystem = port.host.filesystem
+ self.assertTrue(filesystem.isfile(runner._output_json_path()))
+ self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
+
+ def test_run_with_description(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--description', 'some description'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ def create_runner_and_setup_results_template(self, args=[]):
+ runner, port = self.create_runner(args)
+ filesystem = port.host.filesystem
+ filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
+ 'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
+ '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
+ filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
+ return runner, port
+
+ def test_run_respects_no_results(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--no-results'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
+ self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
+
+ def test_run_generates_json_by_default(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+ results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
+
+ self.assertFalse(filesystem.isfile(output_json_path))
+ self.assertFalse(filesystem.isfile(results_page_path))
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ self.assertTrue(filesystem.isfile(output_json_path))
+ self.assertTrue(filesystem.isfile(results_page_path))
+
+ def test_run_merges_output_by_default(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+
+ filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+ self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+
+ def test_run_respects_reset_results(self):
+ runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
+ filesystem = port.host.filesystem
+ output_json_path = runner._output_json_path()
+
+ filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+ self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
+ pass
+
+ def test_run_generates_and_show_results_page(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ page_shown = []
+ port.show_results_html_file = lambda path: page_shown.append(path)
+ filesystem = port.host.filesystem
+ self._test_run_with_json_output(runner, filesystem, results_shown=False)
+
+ expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
+
+ self.maxDiff = None
+ self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
+ self.assertEqual(self._load_output_json(runner), [expected_entry])
+ self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+ 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+ '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
+ self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+ self._test_run_with_json_output(runner, filesystem, results_shown=False)
+ self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
+ self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
+ self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
+ 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+ '<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
+
+ def test_run_respects_no_show_results(self):
+ show_results_html_file = lambda path: page_shown.append(path)
+
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ page_shown = []
+ port.show_results_html_file = show_results_html_file
+ self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
+ self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--no-show-results'])
+ page_shown = []
+ port.show_results_html_file = show_results_html_file
+ self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
+ self.assertEqual(page_shown, [])
+
+ def test_run_with_bad_output_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+ port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+
+ def test_run_with_slave_config_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
+
+ def test_run_with_bad_slave_config_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
+ logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+ self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+
+ def test_run_with_multiple_repositories(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host'])
+ port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ self.assertEqual(self._load_output_json(runner), [{
+ "buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
+ "some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ def test_run_with_upload_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
+
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+ self.assertEqual(generated_json[0]['platform'], 'platform1')
+ self.assertEqual(generated_json[0]['builderName'], 'builder1')
+ self.assertEqual(generated_json[0]['buildNumber'], 123)
+
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
+
+ def test_run_with_upload_json_should_generate_perf_webkit_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
+ '--slave-config-json-path=/mock-checkout/slave-config.json'])
+ port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
+
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
+ generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+ self.assertTrue(isinstance(generated_json, list))
+ self.assertEqual(len(generated_json), 1)
+
+ output = generated_json[0]
+ self.maxDiff = None
+ self.assertEqual(output['platform'], 'platform1')
+ self.assertEqual(output['buildNumber'], 123)
+ self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
+ self.assertEqual(output['builderName'], 'builder1')
+ self.assertEqual(output['builderKey'], 'value1')
+ self.assertEqual(output['revisions'], {'WebKit': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
+ self.assertEqual(output['tests'].keys(), ['Bindings', 'Parser'])
+ self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
+ self.assertEqual(output['tests']['Bindings']['url'], 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings')
+ self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
+ self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
+ 'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
+ 'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
+
+ def test_run_with_repeat(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--repeat', '5'])
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
+ self.assertEqual(self._load_output_json(runner), [
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
+ {"buildTime": "2013-02-08T15:19:37.460000",
+ "tests": self._event_target_wrapper_and_inspector_results,
+ "revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
+
+ def test_run_with_test_runner_count(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-runner-count=3'])
+ self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
+ generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
+ self.assertTrue(isinstance(generated_json, list))
+ self.assertEqual(len(generated_json), 1)
+
+ output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
+ self.assertEqual(len(output), 3)
+ expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
+ for metrics in output:
+ self.assertEqual(metrics, expectedMetrics)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index 16a05599c..29bd7a8b1 100755..100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -32,526 +31,25 @@
import StringIO
import json
import re
-import unittest
+import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
-from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput
-from webkitpy.layout_tests.port.test import TestPort
-from webkitpy.layout_tests.views import printing
-from webkitpy.performance_tests.perftest import ChromiumStylePerfTest
-from webkitpy.performance_tests.perftest import PerfTest
+from webkitpy.port.test import TestPort
+from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
class MainTest(unittest.TestCase):
- def assertWritten(self, stream, contents):
- self.assertEqual(stream.buflist, contents)
-
- def normalizeFinishedTime(self, log):
- return re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log)
-
- class TestDriver:
- def run_test(self, driver_input, stop_when_done):
- text = ''
- timeout = False
- crash = False
- if driver_input.test_name.endswith('pass.html'):
- text = 'RESULT group_name: test_name= 42 ms'
- elif driver_input.test_name.endswith('timeout.html'):
- timeout = True
- elif driver_input.test_name.endswith('failed.html'):
- text = None
- elif driver_input.test_name.endswith('tonguey.html'):
- text = 'we are not expecting an output from perf tests but RESULT blablabla'
- elif driver_input.test_name.endswith('crash.html'):
- crash = True
- elif driver_input.test_name.endswith('event-target-wrapper.html'):
- text = """Running 20 times
-Ignoring warm-up run (1502)
-1504
-1505
-1510
-1504
-1507
-1509
-1510
-1487
-1488
-1472
-1472
-1488
-1473
-1472
-1475
-1487
-1486
-1486
-1475
-1471
-
-Time:
-values 1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471 ms
-avg 1489.05 ms
-median 1487 ms
-stdev 14.46 ms
-min 1471 ms
-max 1510 ms
-"""
- elif driver_input.test_name.endswith('some-parser.html'):
- text = """Running 20 times
-Ignoring warm-up run (1115)
-
-Time:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
-avg 1100 ms
-median 1101 ms
-stdev 11 ms
-min 1080 ms
-max 1120 ms
-"""
- elif driver_input.test_name.endswith('memory-test.html'):
- text = """Running 20 times
-Ignoring warm-up run (1115)
-
-Time:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 ms
-avg 1100 ms
-median 1101 ms
-stdev 11 ms
-min 1080 ms
-max 1120 ms
-
-JS Heap:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
-avg 832000 bytes
-median 829000 bytes
-stdev 15000 bytes
-min 811000 bytes
-max 848000 bytes
-
-Malloc:
-values 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19 bytes
-avg 532000 bytes
-median 529000 bytes
-stdev 13000 bytes
-min 511000 bytes
-max 548000 bytes
-"""
- return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
-
- def start(self):
- """do nothing"""
-
- def stop(self):
- """do nothing"""
-
- def create_runner(self, args=[], driver_class=TestDriver):
+ def create_runner(self, args=[]):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
- test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
-
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
-
- filesystem = runner._host.filesystem
- runner.load_output_json = lambda: json.loads(filesystem.read_text_file(runner._output_json_path()))
return runner, test_port
- def run_test(self, test_name):
- runner, port = self.create_runner()
- driver = MainTest.TestDriver()
- return runner._run_single_test(ChromiumStylePerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name)), driver)
-
- def test_run_passing_test(self):
- self.assertTrue(self.run_test('pass.html'))
-
- def test_run_silent_test(self):
- self.assertFalse(self.run_test('silent.html'))
-
- def test_run_failed_test(self):
- self.assertFalse(self.run_test('failed.html'))
-
- def test_run_tonguey_test(self):
- self.assertFalse(self.run_test('tonguey.html'))
-
- def test_run_timeout_test(self):
- self.assertFalse(self.run_test('timeout.html'))
-
- def test_run_crash_test(self):
- self.assertFalse(self.run_test('crash.html'))
-
- def _tests_for_runner(self, runner, test_names):
- filesystem = runner._host.filesystem
- tests = []
- for test in test_names:
- path = filesystem.join(runner._base_path, test)
- dirname = filesystem.dirname(path)
- if test.startswith('inspector/'):
- tests.append(ChromiumStylePerfTest(runner._port, test, path))
- else:
- tests.append(PerfTest(runner._port, test, path))
- return tests
-
- def test_run_test_set(self):
- runner, port = self.create_runner()
- tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
- 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
- output = OutputCapture()
- output.capture_output()
- try:
- unexpected_result_count = runner._run_tests_set(tests, port)
- finally:
- stdout, stderr, log = output.restore_output()
- self.assertEqual(unexpected_result_count, len(tests) - 1)
- self.assertTrue('\nRESULT group_name: test_name= 42 ms\n' in log)
-
- def test_run_test_set_kills_drt_per_run(self):
-
- class TestDriverWithStopCount(MainTest.TestDriver):
- stop_count = 0
-
- def stop(self):
- TestDriverWithStopCount.stop_count += 1
-
- runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
-
- tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
- 'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
- unexpected_result_count = runner._run_tests_set(tests, port)
-
- self.assertEqual(TestDriverWithStopCount.stop_count, 6)
-
- def test_run_test_pause_before_testing(self):
- class TestDriverWithStartCount(MainTest.TestDriver):
- start_count = 0
-
- def start(self):
- TestDriverWithStartCount.start_count += 1
-
- runner, port = self.create_runner(args=["--pause-before-testing"], driver_class=TestDriverWithStartCount)
- tests = self._tests_for_runner(runner, ['inspector/pass.html'])
-
- output = OutputCapture()
- output.capture_output()
- try:
- unexpected_result_count = runner._run_tests_set(tests, port)
- self.assertEqual(TestDriverWithStartCount.start_count, 1)
- finally:
- stdout, stderr, log = output.restore_output()
- self.assertEqual(self.normalizeFinishedTime(log),
- "Ready to run test?\nRunning inspector/pass.html (1 of 1)\nRESULT group_name: test_name= 42 ms\nFinished: 0.1 s\n\n")
-
- def test_run_test_set_for_parser_tests(self):
- runner, port = self.create_runner()
- tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
- output = OutputCapture()
- output.capture_output()
- try:
- unexpected_result_count = runner._run_tests_set(tests, port)
- finally:
- stdout, stderr, log = output.restore_output()
- self.assertEqual(unexpected_result_count, 0)
- self.assertEqual(self.normalizeFinishedTime(log), '\n'.join(['Running Bindings/event-target-wrapper.html (1 of 2)',
- 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
- 'Finished: 0.1 s',
- '',
- 'Running Parser/some-parser.html (2 of 2)',
- 'RESULT Parser: some-parser= 1100.0 ms',
- 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
- 'Finished: 0.1 s',
- '', '']))
-
- def test_run_memory_test(self):
- runner, port = self.create_runner_and_setup_results_template()
- runner._timestamp = 123456789
- port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
-
- output = OutputCapture()
- output.capture_output()
- try:
- unexpected_result_count = runner.run()
- finally:
- stdout, stderr, log = output.restore_output()
- self.assertEqual(unexpected_result_count, 0)
- self.assertEqual(self.normalizeFinishedTime(log), '\n'.join([
- 'Running 1 tests',
- 'Running Parser/memory-test.html (1 of 1)',
- 'RESULT Parser: memory-test= 1100.0 ms',
- 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
- 'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
- 'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
- 'RESULT Parser: memory-test: Malloc= 532000.0 bytes',
- 'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
- 'Finished: 0.1 s',
- '',
- 'MOCK: user.open_url: file://...',
- '']))
- results = runner.load_output_json()[0]['results']
- values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
- self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms', 'values': values})
- self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes', 'values': values})
- self.assertEqual(results['Parser/memory-test:Malloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes', 'values': values})
-
- def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=False, results_shown=True, expected_exit_code=0):
- filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
- filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
-
- uploaded = [False]
-
- def mock_upload_json(hostname, json_path):
- self.assertEqual(hostname, 'some.host')
- self.assertEqual(json_path, '/mock-checkout/output.json')
- uploaded[0] = upload_suceeds
- return upload_suceeds
-
- runner._upload_json = mock_upload_json
- runner._timestamp = 123456789
- output_capture = OutputCapture()
- output_capture.capture_output()
- try:
- self.assertEqual(runner.run(), expected_exit_code)
- finally:
- stdout, stderr, logs = output_capture.restore_output()
-
- if not expected_exit_code:
- expected_logs = '\n'.join(['Running 2 tests',
- 'Running Bindings/event-target-wrapper.html (1 of 2)',
- 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
- 'Finished: 0.1 s',
- '',
- 'Running inspector/pass.html (2 of 2)',
- 'RESULT group_name: test_name= 42 ms',
- 'Finished: 0.1 s',
- '', ''])
- if results_shown:
- expected_logs += 'MOCK: user.open_url: file://...\n'
- self.assertEqual(self.normalizeFinishedTime(logs), expected_logs)
-
- self.assertEqual(uploaded[0], upload_suceeds)
-
- return logs
-
- _event_target_wrapper_and_inspector_results = {
- "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms",
- "values": [1504, 1505, 1510, 1504, 1507, 1509, 1510, 1487, 1488, 1472, 1472, 1488, 1473, 1472, 1475, 1487, 1486, 1486, 1475, 1471]},
- "inspector/pass.html:group_name:test_name": 42}
-
- def test_run_with_json_output(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--test-results-server=some.host'])
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), [{
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk"}])
-
- filesystem = port.host.filesystem
- self.assertTrue(filesystem.isfile(runner._output_json_path()))
- self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
-
- def test_run_with_description(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--test-results-server=some.host', '--description', 'some description'])
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), [{
- "timestamp": 123456789, "description": "some description",
- "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk"}])
-
- def create_runner_and_setup_results_template(self, args=[]):
- runner, port = self.create_runner(args)
- filesystem = port.host.filesystem
- filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
- 'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
- '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
- filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
- return runner, port
-
- def test_run_respects_no_results(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
- '--test-results-server=some.host', '--no-results'])
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, results_shown=False)
- self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
-
- def test_run_generates_json_by_default(self):
- runner, port = self.create_runner_and_setup_results_template()
- filesystem = port.host.filesystem
- output_json_path = runner._output_json_path()
- results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
-
- self.assertFalse(filesystem.isfile(output_json_path))
- self.assertFalse(filesystem.isfile(results_page_path))
-
- self._test_run_with_json_output(runner, port.host.filesystem)
-
- self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk"}])
-
- self.assertTrue(filesystem.isfile(output_json_path))
- self.assertTrue(filesystem.isfile(results_page_path))
-
- def test_run_merges_output_by_default(self):
- runner, port = self.create_runner_and_setup_results_template()
- filesystem = port.host.filesystem
- output_json_path = runner._output_json_path()
-
- filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
-
- self._test_run_with_json_output(runner, port.host.filesystem)
-
- self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{"previous": "results"}, {
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk"}])
- self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
-
- def test_run_respects_reset_results(self):
- runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
- filesystem = port.host.filesystem
- output_json_path = runner._output_json_path()
-
- filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
-
- self._test_run_with_json_output(runner, port.host.filesystem)
-
- self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk"}])
- self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
- pass
-
- def test_run_generates_and_show_results_page(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
- page_shown = []
- port.show_results_html_file = lambda path: page_shown.append(path)
- filesystem = port.host.filesystem
- self._test_run_with_json_output(runner, filesystem, results_shown=False)
-
- expected_entry = {"timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk"}
-
- self.maxDiff = None
- json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
- self.assertEqual(json.loads(json_output), [expected_entry])
- self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
- 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
- '<script>%s</script>END' % json_output)
- self.assertEqual(page_shown[0], '/mock-checkout/output.html')
-
- self._test_run_with_json_output(runner, filesystem, results_shown=False)
- json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
- self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
- self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
- 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
- '<script>%s</script>END' % json_output)
-
- def test_run_respects_no_show_results(self):
- show_results_html_file = lambda path: page_shown.append(path)
-
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
- page_shown = []
- port.show_results_html_file = show_results_html_file
- self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
- self.assertEqual(page_shown[0], '/mock-checkout/output.html')
-
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--no-show-results'])
- page_shown = []
- port.show_results_html_file = show_results_html_file
- self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
- self.assertEqual(page_shown, [])
-
- def test_run_with_bad_output_json(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
- port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
- self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
- port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
- self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
-
- def test_run_with_slave_config_json(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
- port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), [{
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"}])
-
- def test_run_with_bad_slave_config_json(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
- logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
- self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
- port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
- self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
- port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
- self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
-
- def test_run_with_multiple_repositories(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--test-results-server=some.host'])
- port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- self.assertEqual(runner.load_output_json(), [{
- "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
- "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"}])
-
- def test_run_with_upload_json(self):
- runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
- '--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
-
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=True)
- generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
- self.assertEqual(generated_json[0]['platform'], 'platform1')
- self.assertEqual(generated_json[0]['builder-name'], 'builder1')
- self.assertEqual(generated_json[0]['build-number'], 123)
-
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
-
- def test_upload_json(self):
- runner, port = self.create_runner()
- port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
-
- called = []
- upload_single_text_file_throws = False
- upload_single_text_file_return_value = StringIO.StringIO('OK')
-
- class MockFileUploader:
- def __init__(mock, url, timeout):
- self.assertEqual(url, 'https://some.host/api/test/report')
- self.assertTrue(isinstance(timeout, int) and timeout)
- called.append('FileUploader')
-
- def upload_single_text_file(mock, filesystem, content_type, filename):
- self.assertEqual(filesystem, port.host.filesystem)
- self.assertEqual(content_type, 'application/json')
- self.assertEqual(filename, 'some.json')
- called.append('upload_single_text_file')
- if upload_single_text_file_throws:
- raise "Some exception"
- return upload_single_text_file_return_value
-
- runner._upload_json('some.host', 'some.json', MockFileUploader)
- self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
-
- output = OutputCapture()
- output.capture_output()
- upload_single_text_file_return_value = StringIO.StringIO('Some error')
- runner._upload_json('some.host', 'some.json', MockFileUploader)
- _, _, logs = output.restore_output()
- self.assertEqual(logs, 'Uploaded JSON but got a bad response:\nSome error\n')
-
- # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
- called = []
- upload_single_text_file_throws = True
- runner._upload_json('some.host', 'some.json', MockFileUploader)
- self.assertEqual(called, ['FileUploader', 'upload_single_text_file'])
-
def _add_file(self, runner, dirname, filename, content=True):
dirname = runner._host.filesystem.join(runner._base_path, dirname) if dirname else runner._base_path
runner._host.filesystem.maybe_make_directory(dirname)
@@ -576,7 +74,7 @@ max 548000 bytes
add_file('test2.html')
add_file('test3.html')
port.host.filesystem.chdir(runner._port.perf_tests_dir()[:runner._port.perf_tests_dir().rfind(runner._host.filesystem.sep)])
- self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['test1.html', 'test2.html'])
def test_collect_tests_with_skipped_list(self):
runner, port = self.create_runner()
@@ -587,9 +85,22 @@ max 548000 bytes
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
- self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
-
- def test_collect_tests_with_skipped_list(self):
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html'])
+
+ def test_collect_tests_with_skipped_list_and_files(self):
+ runner, port = self.create_runner(args=['Suite/Test1.html', 'Suite/SkippedTest1.html', 'SkippedSuite/Test1.html'])
+
+ self._add_file(runner, 'SkippedSuite', 'Test1.html')
+ self._add_file(runner, 'SkippedSuite', 'Test2.html')
+ self._add_file(runner, 'Suite', 'Test1.html')
+ self._add_file(runner, 'Suite', 'Test2.html')
+ self._add_file(runner, 'Suite', 'SkippedTest1.html')
+ self._add_file(runner, 'Suite', 'SkippedTest2.html')
+ port.skipped_perf_tests = lambda: ['Suite/SkippedTest1.html', 'Suite/SkippedTest1.html', 'SkippedSuite']
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner),
+ ['SkippedSuite/Test1.html', 'Suite/SkippedTest1.html', 'Suite/Test1.html'])
+
+ def test_collect_tests_with_ignored_skipped_list(self):
runner, port = self.create_runner(args=['--force'])
self._add_file(runner, 'inspector', 'test1.html')
@@ -598,12 +109,12 @@ max 548000 bytes
self._add_file(runner, 'inspector/resources', 'resource_file.html')
self._add_file(runner, 'unsupported', 'unsupported_test2.html')
port.skipped_perf_tests = lambda: ['inspector/unsupported_test1.html', 'unsupported']
- self.assertEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
+ self.assertItemsEqual(self._collect_tests_and_sort_test_name(runner), ['inspector/test1.html', 'inspector/test2.html', 'inspector/unsupported_test1.html', 'unsupported/unsupported_test2.html'])
def test_collect_tests_should_ignore_replay_tests_by_default(self):
runner, port = self.create_runner()
self._add_file(runner, 'Replay', 'www.webkit.org.replay')
- self.assertEqual(runner._collect_tests(), [])
+ self.assertItemsEqual(runner._collect_tests(), [])
def test_collect_tests_with_replay_tests(self):
runner, port = self.create_runner(args=['--replay'])
@@ -612,6 +123,18 @@ max 548000 bytes
self.assertEqual(len(tests), 1)
self.assertEqual(tests[0].__class__.__name__, 'ReplayPerfTest')
+ def test_default_args(self):
+ runner, port = self.create_runner()
+ options, args = PerfTestsRunner._parse_args([])
+ self.assertTrue(options.build)
+ self.assertEqual(options.time_out_ms, 600 * 1000)
+ self.assertTrue(options.generate_results)
+ self.assertTrue(options.show_results)
+ self.assertFalse(options.replay)
+ self.assertTrue(options.use_skipped_list)
+ self.assertEqual(options.repeat, 1)
+ self.assertEqual(options.test_runner_count, DEFAULT_TEST_RUNNER_COUNT)
+
def test_parse_args(self):
runner, port = self.create_runner()
options, args = PerfTestsRunner._parse_args([
@@ -625,20 +148,84 @@ max 548000 bytes
'--output-json-path=a/output.json',
'--slave-config-json-path=a/source.json',
'--test-results-server=somehost',
+ '--additional-drt-flag=--enable-threaded-parser',
+ '--additional-drt-flag=--awesomesauce',
+ '--repeat=5',
+ '--test-runner-count=5',
'--debug'])
- self.assertEqual(options.build, True)
+ self.assertTrue(options.build)
self.assertEqual(options.build_directory, 'folder42')
self.assertEqual(options.platform, 'platform42')
self.assertEqual(options.builder_name, 'webkit-mac-1')
self.assertEqual(options.build_number, '56')
self.assertEqual(options.time_out_ms, '42')
self.assertEqual(options.configuration, 'Debug')
- self.assertEqual(options.show_results, False)
- self.assertEqual(options.reset_results, True)
+ self.assertFalse(options.show_results)
+ self.assertTrue(options.reset_results)
self.assertEqual(options.output_json_path, 'a/output.json')
self.assertEqual(options.slave_config_json_path, 'a/source.json')
self.assertEqual(options.test_results_server, 'somehost')
+ self.assertEqual(options.additional_drt_flag, ['--enable-threaded-parser', '--awesomesauce'])
+ self.assertEqual(options.repeat, 5)
+ self.assertEqual(options.test_runner_count, 5)
+
+ def test_upload_json(self):
+ runner, port = self.create_runner()
+ port.host.filesystem.files['/mock-checkout/some.json'] = 'some content'
+
+ class MockFileUploader:
+ called = []
+ upload_single_text_file_throws = False
+ upload_single_text_file_return_value = None
+ @classmethod
+ def reset(cls):
+ cls.called = []
+ cls.upload_single_text_file_throws = False
+ cls.upload_single_text_file_return_value = None
-if __name__ == '__main__':
- unittest.main()
+ def __init__(mock, url, timeout):
+ self.assertEqual(url, 'https://some.host/some/path')
+ self.assertTrue(isinstance(timeout, int) and timeout)
+ mock.called.append('FileUploader')
+
+ def upload_single_text_file(mock, filesystem, content_type, filename):
+ self.assertEqual(filesystem, port.host.filesystem)
+ self.assertEqual(content_type, 'application/json')
+ self.assertEqual(filename, 'some.json')
+ mock.called.append('upload_single_text_file')
+ if mock.upload_single_text_file_throws:
+ raise Exception
+ return mock.upload_single_text_file_return_value
+
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('OK')
+ self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
+
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('Some error')
+ output = OutputCapture()
+ output.capture_output()
+ self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ _, _, logs = output.restore_output()
+ self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got a bad response:\nSome error\n')
+
+ # Throwing an exception upload_single_text_file shouldn't blow up _upload_json
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_throws = True
+ self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
+
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "OK"}')
+ self.assertTrue(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ self.assertEqual(MockFileUploader.called, ['FileUploader', 'upload_single_text_file'])
+
+ MockFileUploader.reset()
+ MockFileUploader.upload_single_text_file_return_value = StringIO.StringIO('{"status": "SomethingHasFailed", "failureStored": false}')
+ output = OutputCapture()
+ output.capture_output()
+ self.assertFalse(runner._upload_json('some.host', 'some.json', '/some/path', MockFileUploader))
+ _, _, logs = output.restore_output()
+ serialized_json = json.dumps({'status': 'SomethingHasFailed', 'failureStored': False}, indent=4)
+ self.assertEqual(logs, 'Uploaded JSON to https://some.host/some/path but got an error:\n%s\n' % serialized_json)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/__init__.py b/Tools/Scripts/webkitpy/port/__init__.py
index 6365b4ce8..b2a50844c 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/__init__.py
+++ b/Tools/Scripts/webkitpy/port/__init__.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/apple.py b/Tools/Scripts/webkitpy/port/apple.py
index d434c8da8..966d04a9d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/apple.py
+++ b/Tools/Scripts/webkitpy/port/apple.py
@@ -28,7 +28,7 @@
import logging
-from webkitpy.layout_tests.port.base import Port
+from webkitpy.port.base import Port
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
@@ -55,17 +55,21 @@ class ApplePort(Port):
@classmethod
def determine_full_port_name(cls, host, options, port_name):
- # If the port_name matches the (badly named) cls.port_name, that
- # means that they passed 'mac' or 'win' and didn't specify a version.
- # That convention means that we're supposed to use the version currently
- # being run, so this won't work if you're not on mac or win (respectively).
- # If you're not on the o/s in question, you must specify a full version or -future (cf. above).
- if port_name == cls.port_name:
- assert port_name == host.platform.os_name
- return cls.port_name + '-' + host.platform.os_version
- if port_name == cls.port_name + '-wk2':
- assert port_name == host.platform.os_name + '-wk2'
- return cls.port_name + '-' + host.platform.os_version + '-wk2'
+ options = options or {}
+ if port_name in (cls.port_name, cls.port_name + '-wk2'):
+ # If the port_name matches the (badly named) cls.port_name, that
+ # means that they passed 'mac' or 'win' and didn't specify a version.
+ # That convention means that we're supposed to use the version currently
+ # being run, so this won't work if you're not on mac or win (respectively).
+ # If you're not on the o/s in question, you must specify a full version or -future (cf. above).
+ assert host.platform.os_name in port_name, "%s is not in %s!" % (host.platform.os_name, port_name)
+ if port_name == cls.port_name and not getattr(options, 'webkit_test_runner', False):
+ port_name = cls.port_name + '-' + host.platform.os_version
+ else:
+ port_name = cls.port_name + '-' + host.platform.os_version + '-wk2'
+ elif getattr(options, 'webkit_test_runner', False) and '-wk2' not in port_name:
+ port_name += '-wk2'
+
return port_name
def _strip_port_name_prefix(self, port_name):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/port/base.py
index 8c4578dbf..9ded9d193 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/port/base.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -56,12 +55,12 @@ from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.webkit_finder import WebKitFinder
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port import config as port_config
-from webkitpy.layout_tests.port import driver
-from webkitpy.layout_tests.port import http_lock
-from webkitpy.layout_tests.port import image_diff
-from webkitpy.layout_tests.port import server_process
-from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.port import config as port_config
+from webkitpy.port import driver
+from webkitpy.port import http_lock
+from webkitpy.port import image_diff
+from webkitpy.port import server_process
+from webkitpy.port.factory import PortFactory
from webkitpy.layout_tests.servers import apache_http_server
from webkitpy.layout_tests.servers import http_server
from webkitpy.layout_tests.servers import websocket_server
@@ -69,13 +68,11 @@ from webkitpy.layout_tests.servers import websocket_server
_log = logging.getLogger(__name__)
-# FIXME: This class should merge with WebKitPort now that Chromium behaves mostly like other webkit ports.
class Port(object):
"""Abstract class for Port-specific hooks for the layout_test package."""
# Subclasses override this. This should indicate the basic implementation
- # part of the port name, e.g., 'chromium-mac', 'win', 'gtk'; there is probably (?)
- # one unique value per class.
+ # part of the port name, e.g., 'win', 'gtk'; there is probably (?) one unique value per class.
# FIXME: We should probably rename this to something like 'implementation_name'.
port_name = None
@@ -89,15 +86,17 @@ class Port(object):
def determine_full_port_name(cls, host, options, port_name):
"""Return a fully-specified port name that can be used to construct objects."""
# Subclasses will usually override this.
- return cls.port_name
+ options = options or {}
+ assert port_name.startswith(cls.port_name)
+ if getattr(options, 'webkit_test_runner', False) and not '-wk2' in port_name:
+ return port_name + '-wk2'
+ return port_name
- def __init__(self, host, port_name=None, options=None, **kwargs):
+ def __init__(self, host, port_name, options=None, **kwargs):
# This value may be different from cls.port_name by having version modifiers
# and other fields appended to it (for example, 'qt-arm' or 'mac-wk2').
-
- # FIXME: port_name should be a required parameter. It isn't yet because lots of tests need to be updatd.
- self._name = port_name or self.port_name
+ self._name = port_name
# These are default values that should be overridden in a subclasses.
self._version = ''
@@ -108,6 +107,9 @@ class Port(object):
# options defined on it.
self._options = options or optparse.Values()
+ if self._name and '-wk2' in self._name:
+ self._options.webkit_test_runner = True
+
self.host = host
self._executive = host.executive
self._filesystem = host.filesystem
@@ -150,6 +152,9 @@ class Port(object):
def additional_drt_flag(self):
return []
+ def supports_per_test_timeout(self):
+ return False
+
def default_pixel_tests(self):
# FIXME: Disable until they are run by default on build.webkit.org.
return False
@@ -478,8 +483,7 @@ class Port(object):
suffix: file suffix of the expected results, including dot; e.g. '.txt'
or '.png'. This should not be None, but may be an empty string.
platform: the most-specific directory name to use to build the
- search list of directories, e.g., 'chromium-win', or
- 'chromium-cg-mac-leopard' (we follow the WebKit format)
+ search list of directories; e.g. 'mountainlion-wk2'
return_default: if True, returns the path to the generic expectation if nothing
else is found; if False, returns None.
@@ -567,13 +571,13 @@ class Port(object):
if not reftest_list:
reftest_list = []
for expectation, prefix in (('==', ''), ('!=', '-mismatch')):
- for extention in Port._supported_file_extensions:
+ for extention in Port._supported_reference_extensions:
path = self.expected_filename(test_name, prefix + extention)
if self._filesystem.exists(path):
reftest_list.append((expectation, path))
return reftest_list
- return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable-msg=E1103
+ return reftest_list.get(self._filesystem.join(self.layout_tests_dir(), test_name), []) # pylint: disable=E1103
def tests(self, paths):
"""Return the list of tests found. Both generic and platform-specific tests matching paths should be returned."""
@@ -602,14 +606,17 @@ class Port(object):
return [self.relative_test_filename(f) for f in files]
# When collecting test cases, we include any file with these extensions.
- _supported_file_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl',
- '.htm', '.php', '.svg', '.mht'])
+ _supported_test_extensions = set(['.html', '.shtml', '.xml', '.xhtml', '.pl', '.htm', '.php', '.svg', '.mht', '.xht'])
+ _supported_reference_extensions = set(['.html', '.xml', '.xhtml', '.htm', '.svg', '.xht'])
@staticmethod
+ # If any changes are made here be sure to update the isUsedInReftest method in old-run-webkit-tests as well.
def is_reference_html_file(filesystem, dirname, filename):
- if filename.startswith('ref-') or filename.endswith('notref-'):
+ if filename.startswith('ref-') or filename.startswith('notref-'):
return True
- filename_wihout_ext, unused = filesystem.splitext(filename)
+ filename_wihout_ext, ext = filesystem.splitext(filename)
+ if ext not in Port._supported_reference_extensions:
+ return False
for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
if filename_wihout_ext.endswith(suffix):
return True
@@ -619,7 +626,7 @@ class Port(object):
def _has_supported_extension(filesystem, filename):
"""Return true if filename is one of the file extensions we want to run a test on."""
extension = filesystem.splitext(filename)[1]
- return extension in Port._supported_file_extensions
+ return extension in Port._supported_test_extensions
@staticmethod
def _is_test_file(filesystem, dirname, filename):
@@ -767,9 +774,6 @@ class Port(object):
return True
return False
- def is_chromium(self):
- return False
-
def name(self):
"""Returns a name that uniquely identifies this particular type of port
(e.g., "mac-snowleopard" or "chromium-linux-x86_x64" and can be passed
@@ -798,6 +802,10 @@ class Port(object):
return self._options.ensure_value(name, default_value)
@memoized
+ def path_to_generic_test_expectations_file(self):
+ return self._filesystem.join(self.layout_tests_dir(), 'TestExpectations')
+
+ @memoized
def path_to_test_expectations_file(self):
"""Update the test expectations to the passed-in string.
@@ -807,11 +815,7 @@ class Port(object):
# FIXME: We need to remove this when we make rebaselining work with multiple files and just generalize expectations_files().
# test_expectations are always in mac/ not mac-leopard/ by convention, hence we use port_name instead of name().
- port_name = self.port_name
- if port_name.startswith('chromium'):
- port_name = 'chromium'
-
- return self._filesystem.join(self._webkit_baseline_path(port_name), 'TestExpectations')
+ return self._filesystem.join(self._webkit_baseline_path(self.port_name), 'TestExpectations')
def relative_test_filename(self, filename):
"""Returns a test_name a relative unix-style path for a filename under the LayoutTests
@@ -823,12 +827,6 @@ class Port(object):
else:
return self.host.filesystem.abspath(filename)
- def relative_perf_test_filename(self, filename):
- if filename.startswith(self.perf_tests_dir()):
- return self.host.filesystem.relpath(filename, self.perf_tests_dir())
- else:
- return self.host.filesystem.abspath(filename)
-
@memoized
def abspath_for_test(self, test_name):
"""Returns the full path to the file for a given test name. This is the
@@ -898,7 +896,10 @@ class Port(object):
# Most ports (?):
'WEBKIT_TESTFONTS',
- 'WEBKITOUTPUTDIR',
+ 'WEBKIT_OUTPUTDIR',
+
+ # Chromium:
+ 'CHROME_DEVEL_SANDBOX',
]
for variable in variables_to_copy:
self._copy_value_from_environ_if_set(clean_env, variable)
@@ -927,11 +928,6 @@ class Port(object):
method."""
pass
- def requires_http_server(self):
- """Does the port require an HTTP server for running tests? This could
- be the case when the tests aren't run on the host platform."""
- return False
-
def start_http_server(self, additional_dirs=None, number_of_servers=None):
"""Start a web server. Raise an error if it can't start or is already running.
@@ -1065,29 +1061,34 @@ class Port(object):
_log.warning("additional_expectations path '%s' does not exist" % path)
return expectations
- def expectations_files(self):
+ def _port_specific_expectations_files(self):
# Unlike baseline_search_path, we only want to search [WK2-PORT, PORT-VERSION, PORT] and any directories
# included via --additional-platform-directory, not the full casade.
search_paths = [self.port_name]
- if self.name() != self.port_name:
- search_paths.append(self.name())
+
+ non_wk2_name = self.name().replace('-wk2', '')
+ if non_wk2_name != self.port_name:
+ search_paths.append(non_wk2_name)
if self.get_option('webkit_test_runner'):
# Because nearly all of the skipped tests for WebKit 2 are due to cross-platform
# issues, all wk2 ports share a skipped list under platform/wk2.
- search_paths.extend([self._wk2_port_name(), "wk2"])
+ search_paths.extend(["wk2", self._wk2_port_name()])
search_paths.extend(self.get_option("additional_platform_directory", []))
return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in search_paths]
+ def expectations_files(self):
+ return [self.path_to_generic_test_expectations_file()] + self._port_specific_expectations_files()
+
def repository_paths(self):
"""Returns a list of (repository_name, repository_path) tuples of its depending code base.
- By default it returns a list that only contains a ('webkit', <webkitRepossitoryPath>) tuple."""
+ By default it returns a list that only contains a ('WebKit', <webkitRepositoryPath>) tuple."""
- # We use LayoutTest directory here because webkit_base isn't a part webkit repository in Chromium port
+ # We use LayoutTest directory here because webkit_base isn't a part of WebKit repository in Chromium port
# where turnk isn't checked out as a whole.
- return [('webkit', self.layout_tests_dir())]
+ return [('WebKit', self.layout_tests_dir())]
_WDIFF_DEL = '##WDIFF_DEL##'
_WDIFF_ADD = '##WDIFF_ADD##'
@@ -1205,15 +1206,24 @@ class Port(object):
def _is_debian_based(self):
return self._filesystem.exists('/etc/debian_version')
+ def _is_arch_based(self):
+ return self._filesystem.exists('/etc/arch-release')
+
+ def _apache_version(self):
+ config = self._executive.run_command([self._path_to_apache(), '-v'])
+ return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*', r'\1', config)
+
# We pass sys_platform into this method to make it easy to unit test.
def _apache_config_file_name_for_platform(self, sys_platform):
if sys_platform == 'cygwin':
return 'cygwin-httpd.conf' # CYGWIN is the only platform to still use Apache 1.3.
if sys_platform.startswith('linux'):
if self._is_redhat_based():
- return 'fedora-httpd.conf' # This is an Apache 2.x config file despite the naming.
+ return 'fedora-httpd-' + self._apache_version() + '.conf'
if self._is_debian_based():
- return 'apache2-debian-httpd.conf'
+ return 'debian-httpd-' + self._apache_version() + '.conf'
+ if self._is_arch_based():
+ return 'archlinux-httpd.conf'
# All platforms use apache2 except for CYGWIN (and Mac OS X Tiger and prior, which we no longer support).
return "apache2-httpd.conf"
@@ -1252,6 +1262,12 @@ class Port(object):
"""Returns the full path to the test driver (DumpRenderTree)."""
return self._build_path(self.driver_name())
+ def _driver_tempdir(self):
+ return self._filesystem.mkdtemp(prefix='%s-' % self.driver_name())
+
+ def _driver_tempdir_for_environment(self):
+ return self._driver_tempdir()
+
def _path_to_webcore_library(self):
"""Returns the full path to a built copy of WebCore."""
return None
@@ -1325,12 +1341,19 @@ class Port(object):
def look_for_new_crash_logs(self, crashed_processes, start_time):
pass
+ def look_for_new_samples(self, unresponsive_processes, start_time):
+ pass
+
def sample_process(self, name, pid):
pass
def virtual_test_suites(self):
return []
+ def find_system_pid(self, name, pid):
+ # This is only overridden on Windows
+ return pid
+
@memoized
def populated_virtual_test_suites(self):
suites = self.virtual_test_suites()
@@ -1387,7 +1410,6 @@ class Port(object):
def _port_flag_for_scripts(self):
# This is overrriden by ports which need a flag passed to scripts to distinguish the use of that port.
# For example --qt on linux, since a user might have both Gtk and Qt libraries installed.
- # FIXME: Chromium should override this once ChromiumPort is a WebKitPort.
return None
# This is modeled after webkitdirs.pm argumentsForConfiguration() from old-run-webkit-tests
@@ -1432,6 +1454,9 @@ class Port(object):
def _build_driver_flags(self):
return []
+ def test_search_path(self):
+ return self.baseline_search_path()
+
def _tests_for_other_platforms(self):
# By default we will skip any directory under LayoutTests/platform
# that isn't in our baseline search path (this mirrors what
@@ -1440,7 +1465,7 @@ class Port(object):
entries = self._filesystem.glob(self._webkit_baseline_path('*'))
dirs_to_skip = []
for entry in entries:
- if self._filesystem.isdir(entry) and entry not in self.baseline_search_path():
+ if self._filesystem.isdir(entry) and entry not in self.test_search_path():
basename = self._filesystem.basename(entry)
dirs_to_skip.append('platform/%s' % basename)
return dirs_to_skip
@@ -1481,23 +1506,6 @@ class Port(object):
"3D Rendering": ["animations/3d", "transforms/3d"],
}
- # Ports which use compile-time feature detection should define this method and return
- # a dictionary mapping from symbol substrings to possibly disabled test directories.
- # When the symbol substrings are not matched, the directories will be skipped.
- # If ports don't ever enable certain features, then those directories can just be
- # in the Skipped list instead of compile-time-checked here.
- def _missing_symbol_to_skipped_tests(self):
- """Return the supported feature dictionary. The keys are symbol-substrings
- and the values are the lists of directories to skip if that symbol is missing."""
- return {
- "MathMLElement": ["mathml"],
- "GraphicsLayer": ["compositing"],
- "WebCoreHas3DRendering": ["animations/3d", "transforms/3d"],
- "WebGLShader": ["fast/canvas/webgl", "compositing/webgl", "http/tests/canvas/webgl"],
- "MHTMLArchive": ["mhtml"],
- "CSSVariableValue": ["fast/css/variables", "inspector/styles/variables"],
- }
-
def _has_test_in_directories(self, directory_lists, test_list):
if not test_list:
return False
@@ -1520,15 +1528,6 @@ class Port(object):
if supported_feature_list is not None:
return reduce(operator.add, [directories for feature, directories in self._missing_feature_to_skipped_tests().items() if feature not in supported_feature_list])
- # Only check the symbols of there are tests in the test_list that might get skipped.
- # This is a performance optimization to avoid the calling nm.
- # Runtime feature detection not supported, fallback to static dectection:
- # Disable any tests for symbols missing from the executable or libraries.
- if self._has_test_in_directories(self._missing_symbol_to_skipped_tests().values(), test_list):
- symbols_string = self._symbols_string()
- if symbols_string is not None:
- return reduce(operator.add, [directories for symbol_substring, directories in self._missing_symbol_to_skipped_tests().items() if symbol_substring not in symbols_string], [])
-
return []
def _wk2_port_name(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/port/base_unittest.py
index bcc64b601..c04cae0c0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
+++ b/Tools/Scripts/webkitpy/port/base_unittest.py
@@ -30,7 +30,7 @@ import logging
import optparse
import sys
import tempfile
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system import executive_mock
@@ -42,22 +42,22 @@ from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import Port, Driver, DriverOutput
-from webkitpy.layout_tests.port.test import add_unit_tests_to_mock_filesystem, TestPort
+from webkitpy.port import Port, Driver, DriverOutput
+from webkitpy.port.test import add_unit_tests_to_mock_filesystem, TestPort
class PortTest(unittest.TestCase):
- def make_port(self, executive=None, with_tests=False, **kwargs):
+ def make_port(self, executive=None, with_tests=False, port_name=None, **kwargs):
host = MockSystemHost()
if executive:
host.executive = executive
if with_tests:
add_unit_tests_to_mock_filesystem(host.filesystem)
return TestPort(host, **kwargs)
- return Port(host, **kwargs)
+ return Port(host, port_name or 'baseport', **kwargs)
def test_default_child_processes(self):
port = self.make_port()
- self.assertNotEquals(port.default_child_processes(), None)
+ self.assertIsNotNone(port.default_child_processes())
def test_format_wdiff_output_as_html(self):
output = "OUTPUT %s %s %s" % (Port._WDIFF_DEL, Port._WDIFF_ADD, Port._WDIFF_END)
@@ -180,11 +180,11 @@ class PortTest(unittest.TestCase):
# And make sure we actually get diff output.
diff = port.diff_text('foo', 'bar', 'exp.txt', 'act.txt')
- self.assertTrue('foo' in diff)
- self.assertTrue('bar' in diff)
- self.assertTrue('exp.txt' in diff)
- self.assertTrue('act.txt' in diff)
- self.assertFalse('nosuchthing' in diff)
+ self.assertIn('foo', diff)
+ self.assertIn('bar', diff)
+ self.assertIn('exp.txt', diff)
+ self.assertIn('act.txt', diff)
+ self.assertNotIn('nosuchthing', diff)
def test_setup_test_run(self):
port = self.make_port()
@@ -196,8 +196,8 @@ class PortTest(unittest.TestCase):
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/canvas/test', '')
port.host.filesystem.write_text_file(port.layout_tests_dir() + '/css2.1/test', '')
dirs = port.test_dirs()
- self.assertTrue('canvas' in dirs)
- self.assertTrue('css2.1' in dirs)
+ self.assertIn('canvas', dirs)
+ self.assertIn('css2.1', dirs)
def test_skipped_perf_tests(self):
port = self.make_port()
@@ -223,7 +223,7 @@ class PortTest(unittest.TestCase):
def test_get_option__unset(self):
port = self.make_port()
- self.assertEqual(port.get_option('foo'), None)
+ self.assertIsNone(port.get_option('foo'))
def test_get_option__default(self):
port = self.make_port()
@@ -320,7 +320,7 @@ class PortTest(unittest.TestCase):
def test_find_with_skipped_directories(self):
port = self.make_port(with_tests=True)
tests = port.tests(['userscripts'])
- self.assertTrue('userscripts/resources/iframe.html' not in tests)
+ self.assertNotIn('userscripts/resources/iframe.html', tests)
def test_find_with_skipped_directories_2(self):
port = self.make_port(with_tests=True)
@@ -347,6 +347,18 @@ class PortTest(unittest.TestCase):
self.assertFalse(Port._is_test_file(filesystem, '', 'ref-foo.html'))
self.assertFalse(Port._is_test_file(filesystem, '', 'notref-foo.xhr'))
+ def test_is_reference_html_file(self):
+ filesystem = MockFileSystem()
+ self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-expected.html'))
+ self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-expected-mismatch.xml'))
+ self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-ref.xhtml'))
+ self.assertTrue(Port.is_reference_html_file(filesystem, '', 'foo-notref.svg'))
+ self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo.html'))
+ self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.txt'))
+ self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.shtml'))
+ self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.php'))
+ self.assertFalse(Port.is_reference_html_file(filesystem, '', 'foo-expected.mht'))
+
def test_parse_reftest_list(self):
port = self.make_port(with_tests=True)
port.host.filesystem.files['bar/reftest.list'] = "\n".join(["== test.html test-ref.html",
@@ -432,29 +444,25 @@ class PortTest(unittest.TestCase):
def test_tests(self):
port = self.make_port(with_tests=True)
tests = port.tests([])
- self.assertTrue('passes/text.html' in tests)
- self.assertTrue('virtual/passes/text.html' in tests)
+ self.assertIn('passes/text.html', tests)
+ self.assertIn('virtual/passes/text.html', tests)
tests = port.tests(['passes'])
- self.assertTrue('passes/text.html' in tests)
- self.assertTrue('passes/passes/test-virtual-passes.html' in tests)
- self.assertFalse('virtual/passes/text.html' in tests)
+ self.assertIn('passes/text.html', tests)
+ self.assertIn('passes/passes/test-virtual-passes.html', tests)
+ self.assertNotIn('virtual/passes/text.html', tests)
tests = port.tests(['virtual/passes'])
- self.assertFalse('passes/text.html' in tests)
- self.assertTrue('virtual/passes/test-virtual-passes.html' in tests)
- self.assertTrue('virtual/passes/passes/test-virtual-passes.html' in tests)
- self.assertFalse('virtual/passes/test-virtual-virtual/passes.html' in tests)
- self.assertFalse('virtual/passes/virtual/passes/test-virtual-passes.html' in tests)
+ self.assertNotIn('passes/text.html', tests)
+ self.assertIn('virtual/passes/test-virtual-passes.html', tests)
+ self.assertIn('virtual/passes/passes/test-virtual-passes.html', tests)
+ self.assertNotIn('virtual/passes/test-virtual-virtual/passes.html', tests)
+ self.assertNotIn('virtual/passes/virtual/passes/test-virtual-passes.html', tests)
def test_build_path(self):
port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
self.assertEqual(port._build_path(), '/my-build-directory/Release')
- def test_dont_require_http_server(self):
- port = self.make_port()
- self.assertEqual(port.requires_http_server(), False)
-
class NaturalCompareTest(unittest.TestCase):
def setUp(self):
@@ -497,7 +505,3 @@ class KeyCompareTest(unittest.TestCase):
self.assert_cmp('/ab', '/a/a/b', -1)
self.assert_cmp('/a/a/b', '/ab', 1)
self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/builders.py b/Tools/Scripts/webkitpy/port/builders.py
index 380b5ad31..cb0f3e532 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/builders.py
+++ b/Tools/Scripts/webkitpy/port/builders.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -34,7 +33,7 @@ from webkitpy.common.memoized import memoized
# In this dictionary, each item stores:
# * port_name -- a fully qualified port name
-# * specifiers -- a set of specifiers, representing configurations covered by this builder.
+# * is_debug -- whether we are using a debug build
# * move_overwritten_baselines_to -- (optional) list of platform directories that we will copy an existing
# baseline to before pulling down a new baseline during rebaselining. This is useful
# for bringing up a new port, for example when adding a Lion was the most recent Mac version and
@@ -46,45 +45,31 @@ from webkitpy.common.memoized import memoized
# results into platform/mac temporarily.
_exact_matches = {
- # These builders are on build.chromium.org.
- "WebKit XP": {"port_name": "chromium-win-xp", "specifiers": set(["xp", "release"])},
- "WebKit Win7": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "release"])},
- "WebKit Win7 (dbg)(1)": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "debug"])},
- "WebKit Win7 (dbg)(2)": {"port_name": "chromium-win-win7", "specifiers": set(["win7", "debug"])},
- "WebKit Linux": {"port_name": "chromium-linux-x86_64", "specifiers": set(["linux", "x86_64", "release"])},
- "WebKit Linux 32": {"port_name": "chromium-linux-x86", "specifiers": set(["linux", "x86"])},
- "WebKit Linux (dbg)": {"port_name": "chromium-linux-x86_64", "specifiers": set(["linux", "debug"])},
- "WebKit Mac10.6": {"port_name": "chromium-mac-snowleopard", "specifiers": set(["snowleopard"])},
- "WebKit Mac10.6 (dbg)": {"port_name": "chromium-mac-snowleopard", "specifiers": set(["snowleopard", "debug"])},
- "WebKit Mac10.7": {"port_name": "chromium-mac-lion", "specifiers": set(["lion", "release"])},
- "WebKit Mac10.7 (dbg)": {"port_name": "chromium-mac-lion", "specifiers": set(["lion", "debug"])},
- "WebKit Mac10.8": {"port_name": "chromium-mac-mountainlion", "specifiers": set(["mountainlion", "release"])},
-
# These builders are on build.webkit.org.
- "Apple MountainLion Release WK1 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion"]), "rebaseline_override_dir": "mac"},
- "Apple MountainLion Debug WK1 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion", "debug"]), "rebaseline_override_dir": "mac"},
- "Apple MountainLion Release WK2 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion", "wk2"]), "rebaseline_override_dir": "mac"},
- "Apple MountainLion Debug WK2 (Tests)": {"port_name": "mac-mountainlion", "specifiers": set(["mountainlion", "wk2", "debug"]), "rebaseline_override_dir": "mac"},
- "Apple Lion Release WK1 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion"])},
- "Apple Lion Debug WK1 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion", "debug"])},
- "Apple Lion Release WK2 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion", "wk2"])},
- "Apple Lion Debug WK2 (Tests)": {"port_name": "mac-lion", "specifiers": set(["lion", "wk2", "debug"])},
-
- "Apple Win XP Debug (Tests)": {"port_name": "win-xp", "specifiers": set(["win", "debug"])},
+ "Apple MountainLion Release WK1 (Tests)": {"port_name": "mac-mountainlion", "is_debug": False, "rebaseline_override_dir": "mac"},
+ "Apple MountainLion Debug WK1 (Tests)": {"port_name": "mac-mountainlion", "is_debug": True, "rebaseline_override_dir": "mac"},
+ "Apple MountainLion Release WK2 (Tests)": {"port_name": "mac-mountainlion-wk2", "is_debug": False, "rebaseline_override_dir": "mac"},
+ "Apple MountainLion Debug WK2 (Tests)": {"port_name": "mac-mountainlion-wk2", "is_debug": True, "rebaseline_override_dir": "mac"},
+ "Apple Lion Release WK1 (Tests)": {"port_name": "mac-lion", "is_debug": False},
+ "Apple Lion Debug WK1 (Tests)": {"port_name": "mac-lion", "is_debug": True},
+ "Apple Lion Release WK2 (Tests)": {"port_name": "mac-lion-wk2", "is_debug": False},
+ "Apple Lion Debug WK2 (Tests)": {"port_name": "mac-lion-wk2", "is_debug": True},
+
+ "Apple Win XP Debug (Tests)": {"port_name": "win-xp", "is_debug": True},
# FIXME: Remove rebaseline_override_dir once there is an Apple buildbot that corresponds to platform/win.
- "Apple Win 7 Release (Tests)": {"port_name": "win-7sp0", "specifiers": set(["win"]), "rebaseline_override_dir": "win"},
+ "Apple Win 7 Release (Tests)": {"port_name": "win-7sp0", "is_debug": False, "rebaseline_override_dir": "win"},
- "GTK Linux 32-bit Release": {"port_name": "gtk", "specifiers": set(["gtk", "x86", "release"])},
- "GTK Linux 64-bit Debug": {"port_name": "gtk", "specifiers": set(["gtk", "x86_64", "debug"])},
- "GTK Linux 64-bit Release": {"port_name": "gtk", "specifiers": set(["gtk", "x86_64", "release"])},
- "GTK Linux 64-bit Release WK2 (Tests)": {"port_name": "gtk", "specifiers": set(["gtk", "x86_64", "wk2", "release"])},
+ "GTK Linux 32-bit Release": {"port_name": "gtk", "is_debug": False},
+ "GTK Linux 64-bit Debug": {"port_name": "gtk", "is_debug": True},
+ "GTK Linux 64-bit Release": {"port_name": "gtk", "is_debug": False},
+ "GTK Linux 64-bit Release WK2 (Tests)": {"port_name": "gtk-wk2", "is_debug": False},
# FIXME: Remove rebaseline_override_dir once there are Qt bots for all the platform/qt-* directories.
- "Qt Linux Release": {"port_name": "qt-linux", "specifiers": set(["win", "linux", "mac"]), "rebaseline_override_dir": "qt"},
+ "Qt Linux Release": {"port_name": "qt-linux", "is_debug": False, "rebaseline_override_dir": "qt"},
- "EFL Linux 64-bit Release": {"port_name": "efl", "specifiers": set(["efl", "release"])},
- "EFL Linux 64-bit Release WK2": {"port_name": "efl", "specifiers": set(["efl", "wk2", "release"])},
- "EFL Linux 64-bit Debug WK2": {"port_name": "efl", "specifiers": set(["efl", "wk2", "debug"])},
+ "EFL Linux 64-bit Release": {"port_name": "efl", "is_debug": False},
+ "EFL Linux 64-bit Release WK2": {"port_name": "efl-wk2", "is_debug": False},
+ "EFL Linux 64-bit Debug WK2": {"port_name": "efl-wk2", "is_debug": True},
}
@@ -95,9 +80,6 @@ _fuzzy_matches = {
r"Windows": "win",
r"GTK": "gtk",
r"Qt": "qt",
- r"Chromium Mac": "chromium-mac",
- r"Chromium Linux": "chromium-linux",
- r"Chromium Win": "chromium-win",
}
@@ -105,8 +87,6 @@ _ports_without_builders = [
"qt-mac",
"qt-win",
"qt-wk2",
- # FIXME: Move to _extact_matches.
- "chromium-android",
]
@@ -122,10 +102,6 @@ def all_port_names():
return sorted(set(map(lambda x: x["port_name"], _exact_matches.values()) + _ports_without_builders))
-def coverage_specifiers_for_builder_name(builder_name):
- return _exact_matches[builder_name].get("specifiers", set())
-
-
def rebaseline_override_dir(builder_name):
return _exact_matches[builder_name].get("rebaseline_override_dir", None)
@@ -144,10 +120,14 @@ def port_name_for_builder_name(builder_name):
def builder_name_for_port_name(target_port_name):
+ debug_builder_name = None
for builder_name, builder_info in _exact_matches.items():
- if builder_info['port_name'] == target_port_name and 'debug' not in builder_info['specifiers']:
- return builder_name
- return None
+ if builder_info['port_name'] == target_port_name:
+ if builder_info['is_debug']:
+ debug_builder_name = builder_name
+ else:
+ return builder_name
+ return debug_builder_name
def builder_path_for_port_name(port_name):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py b/Tools/Scripts/webkitpy/port/builders_unittest.py
index 74320f2ad..77551b983 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/builders_unittest.py
+++ b/Tools/Scripts/webkitpy/port/builders_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import builders
-import unittest
+import unittest2 as unittest
class BuildersTest(unittest.TestCase):
@@ -39,6 +39,3 @@ class BuildersTest(unittest.TestCase):
}
for name, expected in tests.items():
self.assertEqual(expected, builders.builder_path_from_name(name))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config.py b/Tools/Scripts/webkitpy/port/config.py
index 828e2af37..8c893533b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/config.py
+++ b/Tools/Scripts/webkitpy/port/config.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py b/Tools/Scripts/webkitpy/port/config_standalone.py
index 5b0483145..274a07b33 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/config_standalone.py
+++ b/Tools/Scripts/webkitpy/port/config_standalone.py
@@ -33,7 +33,6 @@ https://bugs.webkit.org/show_bug?id=49360 for the motivation. We can remove
this test when we remove the global configuration cache in config.py."""
import os
-import unittest
import sys
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py b/Tools/Scripts/webkitpy/port/config_unittest.py
index 4479204ad..4015d5b61 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/config_unittest.py
+++ b/Tools/Scripts/webkitpy/port/config_unittest.py
@@ -28,7 +28,7 @@
import os
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive import Executive, ScriptError
from webkitpy.common.system.executive_mock import MockExecutive2
@@ -132,7 +132,7 @@ class ConfigTest(unittest.TestCase):
e = Executive()
fs = FileSystem()
c = config.Config(e, fs)
- script = WebKitFinder(fs).path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'layout_tests', 'port', 'config_standalone.py')
+ script = WebKitFinder(fs).path_from_webkit_base('Tools', 'Scripts', 'webkitpy', 'port', 'config_standalone.py')
# Note: don't use 'Release' here, since that's the normal default.
expected = 'Debug'
@@ -156,7 +156,3 @@ class ConfigTest(unittest.TestCase):
c = self.make_config(exception=ScriptError())
actual = c.default_configuration()
self.assertEqual(actual, 'Release')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/port/driver.py
index e883590cf..5061bd6d1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver.py
+++ b/Tools/Scripts/webkitpy/port/driver.py
@@ -78,7 +78,7 @@ class DriverOutput(object):
def __init__(self, text, image, image_hash, audio, crash=False,
test_time=0, measurements=None, timeout=False, error='', crashed_process_name='??',
- crashed_pid=None, crash_log=None):
+ crashed_pid=None, crash_log=None, pid=None):
# FIXME: Args could be renamed to better clarify what they do.
self.text = text
self.image = image # May be empty-string if the test crashes.
@@ -93,6 +93,7 @@ class DriverOutput(object):
self.measurements = measurements
self.timeout = timeout
self.error = error # stderr output
+ self.pid = pid
def has_stderr(self):
return bool(self.error)
@@ -142,7 +143,9 @@ class Driver(object):
self._measurements = {}
if self._port.get_option("profile"):
- self._profiler = ProfilerFactory.create_profiler(self._port.host, self._port._path_to_driver(), self._port.results_directory())
+ profiler_name = self._port.get_option("profiler")
+ self._profiler = ProfilerFactory.create_profiler(self._port.host,
+ self._port._path_to_driver(), self._port.results_directory(), profiler_name)
else:
self._profiler = None
@@ -156,7 +159,7 @@ class Driver(object):
the driver in an indeterminate state. The upper layers of the program
are responsible for cleaning up and ensuring things are okay.
- Returns a DriverOuput object.
+ Returns a DriverOutput object.
"""
start_time = time.time()
self.start(driver_input.should_run_pixel_test, driver_input.args)
@@ -173,6 +176,7 @@ class Driver(object):
crashed = self.has_crashed()
timed_out = self._server_process.timed_out
+ pid = self._server_process.pid()
if stop_when_done or crashed or timed_out:
# We call stop() even if we crashed or timed out in order to get any remaining stdout/stderr output.
@@ -204,7 +208,7 @@ class Driver(object):
crash=crashed, test_time=time.time() - test_begin_time, measurements=self._measurements,
timeout=timed_out, error=self.error_from_test,
crashed_process_name=self._crashed_process_name,
- crashed_pid=self._crashed_pid, crash_log=crash_log)
+ crashed_pid=self._crashed_pid, crash_log=crash_log, pid=pid)
def _get_crash_log(self, stdout, stderr, newer_than):
return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
@@ -274,27 +278,42 @@ class Driver(object):
# into run_test() directly.
if not self._server_process:
self._start(pixel_tests, per_test_args)
+ self._run_post_start_tasks()
- def _start(self, pixel_tests, per_test_args):
- self.stop()
- self._driver_tempdir = self._port._filesystem.mkdtemp(prefix='%s-' % self._port.driver_name())
- server_name = self._port.driver_name()
- environment = self._port.setup_environ_for_server(server_name)
+ def _setup_environ_for_driver(self, environment):
environment['DYLD_LIBRARY_PATH'] = self._port._build_path()
environment['DYLD_FRAMEWORK_PATH'] = self._port._build_path()
# FIXME: We're assuming that WebKitTestRunner checks this DumpRenderTree-named environment variable.
+ # FIXME: Commented out for now to avoid tests breaking. Re-enable after
+ # we cut over to NRWT
+ #environment['DUMPRENDERTREE_TEMP'] = str(self._port._driver_tempdir_for_environment())
environment['DUMPRENDERTREE_TEMP'] = str(self._driver_tempdir)
environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
- if 'WEBKITOUTPUTDIR' in os.environ:
- environment['WEBKITOUTPUTDIR'] = os.environ['WEBKITOUTPUTDIR']
+ if 'WEBKIT_OUTPUTDIR' in os.environ:
+ environment['WEBKIT_OUTPUTDIR'] = os.environ['WEBKIT_OUTPUTDIR']
if self._profiler:
environment = self._profiler.adjusted_environment(environment)
+ return environment
+
+ def _start(self, pixel_tests, per_test_args):
+ self.stop()
+ self._driver_tempdir = self._port._driver_tempdir()
+ server_name = self._port.driver_name()
+ environment = self._port.setup_environ_for_server(server_name)
+ environment = self._setup_environ_for_driver(environment)
self._crashed_process_name = None
self._crashed_pid = None
self._server_process = self._port._server_process_constructor(self._port, server_name, self.cmd_line(pixel_tests, per_test_args), environment)
self._server_process.start()
+
+ def _run_post_start_tasks(self):
+ # Remote drivers may override this to delay post-start tasks until the server has ack'd.
if self._profiler:
- self._profiler.attach_to_pid(self._server_process.pid())
+ self._profiler.attach_to_pid(self._pid_on_target())
+
+ def _pid_on_target(self):
+ # Remote drivers will override this method to return the pid on the device.
+ return self._server_process.pid()
def stop(self):
if self._server_process:
@@ -346,6 +365,7 @@ class Driver(object):
_log.debug('%s crash, pid = %s, error_line = %s' % (self._crashed_process_name, str(pid), error_line))
if error_line.startswith("#PROCESS UNRESPONSIVE - "):
self._subprocess_was_unresponsive = True
+ self._port.sample_process(self._crashed_process_name, self._crashed_pid)
# We want to show this since it's not a regular crash and probably we don't have a crash log.
self.error_from_test += error_line
return True
@@ -365,6 +385,8 @@ class Driver(object):
assert not driver_input.image_hash or driver_input.should_run_pixel_test
# ' is the separator between arguments.
+ if self._port.supports_per_test_timeout():
+ command += "'--timeout'%s" % driver_input.timeout
if driver_input.should_run_pixel_test:
command += "'--pixel-test"
if driver_input.image_hash:
@@ -492,8 +514,7 @@ class DriverProxy(object):
# FIXME: We shouldn't need to create a driver until we actually run a test.
self._driver = self._make_driver(pixel_tests)
- self._running_drivers = {}
- self._running_drivers[self._cmd_line_as_key(pixel_tests, [])] = self._driver
+ self._driver_cmd_line = None
def _make_driver(self, pixel_tests):
return self._driver_instance_constructor(self._port, self._worker_number, pixel_tests, self._no_timeout)
@@ -520,27 +541,18 @@ class DriverProxy(object):
pixel_tests_needed = driver_input.should_run_pixel_test
cmd_line_key = self._cmd_line_as_key(pixel_tests_needed, driver_input.args)
- if not cmd_line_key in self._running_drivers:
- self._running_drivers[cmd_line_key] = self._make_driver(pixel_tests_needed)
-
- return self._running_drivers[cmd_line_key].run_test(driver_input, stop_when_done)
+ if cmd_line_key != self._driver_cmd_line:
+ self._driver.stop()
+ self._driver = self._make_driver(pixel_tests_needed)
+ self._driver_cmd_line = cmd_line_key
- def start(self):
- # FIXME: Callers shouldn't normally call this, since this routine
- # may not be specifying the correct combination of pixel test and
- # per_test args.
- #
- # The only reason we have this routine at all is so the perftestrunner
- # can pause before running a test; it might be better to push that
- # into run_test() directly.
- self._driver.start(self._port.get_option('pixel_tests'), [])
+ return self._driver.run_test(driver_input, stop_when_done)
def has_crashed(self):
- return any(driver.has_crashed() for driver in self._running_drivers.values())
+ return self._driver.has_crashed()
def stop(self):
- for driver in self._running_drivers.values():
- driver.stop()
+ self._driver.stop()
# FIXME: this should be a @classmethod (or implemented on Port instead).
def cmd_line(self, pixel_tests=None, per_test_args=None):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/Tools/Scripts/webkitpy/port/driver_unittest.py
index 6a5481f6a..adfdc6344 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
+++ b/Tools/Scripts/webkitpy/port/driver_unittest.py
@@ -26,15 +26,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import Port, Driver, DriverOutput
-from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.port import Port, Driver, DriverOutput
+from webkitpy.port.server_process_mock import MockServerProcess
# FIXME: remove the dependency on TestWebKitPort
-from webkitpy.layout_tests.port.port_testcase import TestWebKitPort
+from webkitpy.port.port_testcase import TestWebKitPort
from webkitpy.tool.mocktool import MockOptions
@@ -82,7 +82,7 @@ class DriverOutputTest(unittest.TestCase):
class DriverTest(unittest.TestCase):
def make_port(self):
- port = Port(MockSystemHost(), MockOptions(configuration='Release'))
+ port = Port(MockSystemHost(), 'test', MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: '/mock-build'
return port
@@ -267,7 +267,3 @@ class DriverTest(unittest.TestCase):
driver = Driver(port, 0, pixel_tests=True)
driver.start(True, [])
self.assertTrue(driver._server_process.started)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl.py b/Tools/Scripts/webkitpy/port/efl.py
index 0c9acd8d8..01c66eba8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/efl.py
+++ b/Tools/Scripts/webkitpy/port/efl.py
@@ -30,35 +30,45 @@
import os
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.base import Port
-from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
-from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+from webkitpy.port.base import Port
+from webkitpy.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.port.xvfbdriver import XvfbDriver
-class EflPort(Port, PulseAudioSanitizer):
+
+class EflPort(Port):
port_name = 'efl'
def __init__(self, *args, **kwargs):
super(EflPort, self).__init__(*args, **kwargs)
- self._jhbuild_wrapper_path = self.path_from_webkit_base('Tools', 'efl', 'run-with-jhbuild')
+ self._jhbuild_wrapper_path = [self.path_from_webkit_base('Tools', 'jhbuild', 'jhbuild-wrapper'), '--efl', 'run']
- self.set_option_default('wrapper', self._jhbuild_wrapper_path)
+ self.set_option_default('wrapper', ' '.join(self._jhbuild_wrapper_path))
self.webprocess_cmd_prefix = self.get_option('webprocess_cmd_prefix')
+ self._pulseaudio_sanitizer = PulseAudioSanitizer()
+
def _port_flag_for_scripts(self):
return "--efl"
def setup_test_run(self):
- self._unload_pulseaudio_module()
+ super(EflPort, self).setup_test_run()
+ self._pulseaudio_sanitizer.unload_pulseaudio_module()
def setup_environ_for_server(self, server_name=None):
env = super(EflPort, self).setup_environ_for_server(server_name)
+
# If DISPLAY environment variable is unset in the system
# e.g. on build bot, remove DISPLAY variable from the dictionary
if not 'DISPLAY' in os.environ:
del env['DISPLAY']
+
env['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('lib', 'libTestRunnerInjectedBundle.so')
env['TEST_RUNNER_PLUGIN_PATH'] = self._build_path('lib')
+
+ # Silence GIO warnings about using the "memory" GSettings backend.
+ env['GSETTINGS_BACKEND'] = 'memory'
+
if self.webprocess_cmd_prefix:
env['WEB_PROCESS_CMD_PREFIX'] = self.webprocess_cmd_prefix
@@ -73,7 +83,7 @@ class EflPort(Port, PulseAudioSanitizer):
def clean_up_test_run(self):
super(EflPort, self).clean_up_test_run()
- self._restore_pulseaudio_module()
+ self._pulseaudio_sanitizer.restore_pulseaudio_module()
def _generate_all_test_configurations(self):
return [TestConfiguration(version=self._version, architecture='x86', build_type=build_type) for build_type in self.ALL_BUILD_TYPES]
@@ -88,7 +98,7 @@ class EflPort(Port, PulseAudioSanitizer):
return self._build_path('bin', 'ImageDiff')
def _image_diff_command(self, *args, **kwargs):
- return [self._jhbuild_wrapper_path] + super(EflPort, self)._image_diff_command(*args, **kwargs)
+ return self._jhbuild_wrapper_path + super(EflPort, self)._image_diff_command(*args, **kwargs)
def _path_to_webcore_library(self):
static_path = self._build_path('lib', 'libwebcore_efl.a')
@@ -108,7 +118,7 @@ class EflPort(Port, PulseAudioSanitizer):
def default_baseline_search_path(self):
return map(self._webkit_baseline_path, self._search_paths())
- def expectations_files(self):
+ def _port_specific_expectations_files(self):
# FIXME: We should be able to use the default algorithm here.
return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self._search_paths()]))
@@ -121,3 +131,6 @@ class EflPort(Port, PulseAudioSanitizer):
# FIXME: old-run-webkit-tests also added ["-graphicssystem", "raster", "-style", "windows"]
# FIXME: old-run-webkit-tests converted results_filename path for cygwin.
self._run_script("run-launcher", run_launcher_args)
+
+ def check_sys_deps(self, needs_http):
+ return super(EflPort, self).check_sys_deps(needs_http) and XvfbDriver.check_xvfb(self)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl_unittest.py b/Tools/Scripts/webkitpy/port/efl_unittest.py
index 1ac687b18..a16dbaf57 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/efl_unittest.py
+++ b/Tools/Scripts/webkitpy/port/efl_unittest.py
@@ -24,18 +24,25 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.port.efl import EflPort
-from webkitpy.layout_tests.port import port_testcase
from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.port.efl import EflPort
+from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
+from webkitpy.port import port_testcase
class EflPortTest(port_testcase.PortTestCase):
port_name = 'efl'
port_maker = EflPort
+ # Additionally mocks out the PulseAudioSanitizer methods.
+ def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
+ port = super(EflPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
+ port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
+ return port
+
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory.py b/Tools/Scripts/webkitpy/port/factory.py
index ad7c64454..e9f146563 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/factory.py
+++ b/Tools/Scripts/webkitpy/port/factory.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -33,19 +32,13 @@ import fnmatch
import optparse
import re
-from webkitpy.layout_tests.port import builders
+from webkitpy.port import builders
def platform_options(use_globs=False):
return [
optparse.make_option('--platform', action='store',
help=('Glob-style list of platform/ports to use (e.g., "mac*")' if use_globs else 'Platform to use (e.g., "mac-lion")')),
- optparse.make_option('--chromium', action='store_const', dest='platform',
- const=('chromium*' if use_globs else 'chromium'),
- help=('Alias for --platform=chromium*' if use_globs else 'Alias for --platform=chromium')),
- optparse.make_option('--chromium-android', action='store_const', dest='platform',
- const=('chromium-android*' if use_globs else 'chromium-android'),
- help=('Alias for --platform=chromium-android*' if use_globs else 'Alias for --platform=chromium')),
optparse.make_option('--efl', action='store_const', dest='platform',
const=('efl*' if use_globs else 'efl'),
help=('Alias for --platform=efl*' if use_globs else 'Alias for --platform=efl')),
@@ -81,10 +74,6 @@ def _builder_options(builder_name):
class PortFactory(object):
PORT_CLASSES = (
- 'chromium_android.ChromiumAndroidPort',
- 'chromium_linux.ChromiumLinuxPort',
- 'chromium_mac.ChromiumMacPort',
- 'chromium_win.ChromiumWinPort',
'efl.EflPort',
'gtk.GtkPort',
'mac.MacPort',
@@ -100,7 +89,7 @@ class PortFactory(object):
def _default_port(self, options):
platform = self._host.platform
if platform.is_linux() or platform.is_freebsd():
- return 'chromium-linux'
+ return 'qt-linux'
elif platform.is_mac():
return 'mac'
elif platform.is_win():
@@ -113,12 +102,6 @@ class PortFactory(object):
appropriate port on this platform."""
port_name = port_name or self._default_port(options)
- # FIXME(dpranke): We special-case '--platform chromium' so that it can co-exist
- # with '--platform chromium-mac' and '--platform chromium-linux' properly (we
- # can't look at the port_name prefix in this case).
- if port_name == 'chromium':
- port_name = 'chromium-' + self._host.platform.os_name
-
for port_class in self.PORT_CLASSES:
module_name, class_name = port_class.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [], -1)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py b/Tools/Scripts/webkitpy/port/factory_unittest.py
index 915c17d89..6f09a2eb9 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/factory_unittest.py
+++ b/Tools/Scripts/webkitpy/port/factory_unittest.py
@@ -26,21 +26,17 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import chromium_android
-from webkitpy.layout_tests.port import chromium_linux
-from webkitpy.layout_tests.port import chromium_mac
-from webkitpy.layout_tests.port import chromium_win
-from webkitpy.layout_tests.port import factory
-from webkitpy.layout_tests.port import gtk
-from webkitpy.layout_tests.port import mac
-from webkitpy.layout_tests.port import qt
-from webkitpy.layout_tests.port import test
-from webkitpy.layout_tests.port import win
+from webkitpy.port import factory
+from webkitpy.port import gtk
+from webkitpy.port import mac
+from webkitpy.port import qt
+from webkitpy.port import test
+from webkitpy.port import win
class FactoryTest(unittest.TestCase):
@@ -54,7 +50,7 @@ class FactoryTest(unittest.TestCase):
def assert_port(self, port_name=None, os_name=None, os_version=None, options=None, cls=None):
host = MockSystemHost(os_name=os_name, os_version=os_version)
port = factory.PortFactory(host).get(port_name, options=options)
- self.assertTrue(isinstance(port, cls))
+ self.assertIsInstance(port, cls)
def test_mac(self):
self.assert_port(port_name='mac-lion', cls=mac.MacPort)
@@ -75,29 +71,6 @@ class FactoryTest(unittest.TestCase):
def test_qt(self):
self.assert_port(port_name='qt', cls=qt.QtPort)
- def test_chromium_mac(self):
- self.assert_port(port_name='chromium-mac', os_name='mac', os_version='snowleopard',
- cls=chromium_mac.ChromiumMacPort)
- self.assert_port(port_name='chromium', os_name='mac', os_version='lion',
- cls=chromium_mac.ChromiumMacPort)
-
- def test_chromium_linux(self):
- self.assert_port(port_name='chromium-linux', cls=chromium_linux.ChromiumLinuxPort)
- self.assert_port(port_name='chromium', os_name='linux', os_version='lucid',
- cls=chromium_linux.ChromiumLinuxPort)
-
- def test_chromium_android(self):
- self.assert_port(port_name='chromium-android', cls=chromium_android.ChromiumAndroidPort)
- # NOTE: We can't check for port_name=chromium here, as this will append the host's
- # operating system, whereas host!=target for Android.
-
- def test_chromium_win(self):
- self.assert_port(port_name='chromium-win-xp', cls=chromium_win.ChromiumWinPort)
- self.assert_port(port_name='chromium-win', os_name='win', os_version='xp',
- cls=chromium_win.ChromiumWinPort)
- self.assert_port(port_name='chromium', os_name='win', os_version='xp',
- cls=chromium_win.ChromiumWinPort)
-
def test_unknown_specified(self):
self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost()).get, port_name='unknown')
@@ -105,9 +78,5 @@ class FactoryTest(unittest.TestCase):
self.assertRaises(NotImplementedError, factory.PortFactory(MockSystemHost(os_name='vms')).get)
def test_get_from_builder_name(self):
- self.assertEqual(factory.PortFactory(MockSystemHost()).get_from_builder_name('WebKit Mac10.7').name(),
- 'chromium-mac-lion')
-
-
-if __name__ == '__main__':
- unittest.main()
+ self.assertEqual(factory.PortFactory(MockSystemHost()).get_from_builder_name('Apple Lion Release WK1 (Tests)').name(),
+ 'mac-lion')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py b/Tools/Scripts/webkitpy/port/gtk.py
index 2ec7d70c4..63b8eb89a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
+++ b/Tools/Scripts/webkitpy/port/gtk.py
@@ -30,16 +30,20 @@ import os
import subprocess
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.base import Port
-from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
-from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+from webkitpy.port.base import Port
+from webkitpy.port.pulseaudio_sanitizer import PulseAudioSanitizer
+from webkitpy.port.xvfbdriver import XvfbDriver
-class GtkPort(Port, PulseAudioSanitizer):
+class GtkPort(Port):
port_name = "gtk"
+ def __init__(self, *args, **kwargs):
+ super(GtkPort, self).__init__(*args, **kwargs)
+ self._pulseaudio_sanitizer = PulseAudioSanitizer()
+
def warn_if_bug_missing_in_test_expectations(self):
- return True
+ return not self.get_option('webkit_test_runner')
def _port_flag_for_scripts(self):
return "--gtk"
@@ -48,20 +52,17 @@ class GtkPort(Port, PulseAudioSanitizer):
return XvfbDriver
def default_timeout_ms(self):
- # For now, use the base Port's default timeout value in case of WebKitTestRunner.
- if self.get_option('webkit_test_runner'):
- return super(GtkPort, self).default_timeout_ms()
-
if self.get_option('configuration') == 'Debug':
return 12 * 1000
return 6 * 1000
def setup_test_run(self):
- self._unload_pulseaudio_module()
+ super(GtkPort, self).setup_test_run()
+ self._pulseaudio_sanitizer.unload_pulseaudio_module()
def clean_up_test_run(self):
super(GtkPort, self).clean_up_test_run()
- self._restore_pulseaudio_module()
+ self._pulseaudio_sanitizer.restore_pulseaudio_module()
def setup_environ_for_server(self, server_name=None):
environment = super(GtkPort, self).setup_environ_for_server(server_name)
@@ -72,7 +73,7 @@ class GtkPort(Port, PulseAudioSanitizer):
environment['TEST_RUNNER_TEST_PLUGIN_PATH'] = self._build_path('TestNetscapePlugin', '.libs')
environment['WEBKIT_INSPECTOR_PATH'] = self._build_path('Programs', 'resources', 'inspector')
environment['AUDIO_RESOURCES_PATH'] = self.path_from_webkit_base('Source', 'WebCore', 'platform', 'audio', 'resources')
- self._copy_value_from_environ_if_set(environment, 'WEBKITOUTPUTDIR')
+ self._copy_value_from_environ_if_set(environment, 'WEBKIT_OUTPUTDIR')
return environment
def _generate_all_test_configurations(self):
@@ -100,6 +101,22 @@ class GtkPort(Port, PulseAudioSanitizer):
return full_library
return None
+ def _search_paths(self):
+ search_paths = []
+ if self.get_option('webkit_test_runner'):
+ search_paths.extend([self.port_name + '-wk2', 'wk2'])
+ else:
+ search_paths.append(self.port_name + '-wk1')
+ search_paths.append(self.port_name)
+ search_paths.extend(self.get_option("additional_platform_directory", []))
+ return search_paths
+
+ def default_baseline_search_path(self):
+ return map(self._webkit_baseline_path, self._search_paths())
+
+ def _port_specific_expectations_files(self):
+ return [self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in reversed(self._search_paths())]
+
# FIXME: We should find a way to share this implmentation with Gtk,
# or teach run-launcher how to call run-safari and move this down to Port.
def show_results_html_file(self, results_filename):
@@ -110,13 +127,15 @@ class GtkPort(Port, PulseAudioSanitizer):
# FIXME: old-run-webkit-tests converted results_filename path for cygwin.
self._run_script("run-launcher", run_launcher_args)
+ def check_sys_deps(self, needs_http):
+ return super(GtkPort, self).check_sys_deps(needs_http) and XvfbDriver.check_xvfb(self)
+
def _get_gdb_output(self, coredump_path):
- cmd = ['gdb', '-ex', 'thread apply all bt', '--batch', str(self._path_to_driver()), coredump_path]
+ cmd = ['gdb', '-ex', 'thread apply all bt 1024', '--batch', str(self._path_to_driver()), coredump_path]
proc = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- proc.wait()
- errors = [l.strip().decode('utf8', 'ignore') for l in proc.stderr.readlines()]
- trace = proc.stdout.read().decode('utf8', 'ignore')
- return (trace, errors)
+ stdout, stderr = proc.communicate()
+ errors = [l.strip().decode('utf8', 'ignore') for l in stderr.splitlines()]
+ return (stdout.decode('utf8', 'ignore'), errors)
def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
pid_representation = str(pid or '<unknown>')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py b/Tools/Scripts/webkitpy/port/gtk_unittest.py
index 7002495a4..e806091bb 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk_unittest.py
+++ b/Tools/Scripts/webkitpy/port/gtk_unittest.py
@@ -26,16 +26,17 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
import sys
import os
-from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.port.gtk import GtkPort
-from webkitpy.layout_tests.port import port_testcase
from webkitpy.common.system.executive_mock import MockExecutive
-from webkitpy.thirdparty.mock import Mock
from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.port.gtk import GtkPort
+from webkitpy.port.pulseaudio_sanitizer_mock import PulseAudioSanitizerMock
+from webkitpy.port import port_testcase
+from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
@@ -43,6 +44,33 @@ class GtkPortTest(port_testcase.PortTestCase):
port_name = 'gtk'
port_maker = GtkPort
+ # Additionally mocks out the PulseAudioSanitizer methods.
+ def make_port(self, host=None, port_name=None, options=None, os_name=None, os_version=None, **kwargs):
+ port = super(GtkPortTest, self).make_port(host, port_name, options, os_name, os_version, **kwargs)
+ port._pulseaudio_sanitizer = PulseAudioSanitizerMock()
+ return port
+
+ def test_default_baseline_search_path(self):
+ port = self.make_port()
+ self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk1',
+ '/mock-checkout/LayoutTests/platform/gtk'])
+
+ port = self.make_port(options=MockOptions(webkit_test_runner=True))
+ self.assertEqual(port.default_baseline_search_path(), ['/mock-checkout/LayoutTests/platform/gtk-wk2',
+ '/mock-checkout/LayoutTests/platform/wk2', '/mock-checkout/LayoutTests/platform/gtk'])
+
+ def test_port_specific_expectations_files(self):
+ port = self.make_port()
+ self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
+ '/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
+ '/mock-checkout/LayoutTests/platform/gtk-wk1/TestExpectations'])
+
+ port = self.make_port(options=MockOptions(webkit_test_runner=True))
+ self.assertEqual(port.expectations_files(), ['/mock-checkout/LayoutTests/TestExpectations',
+ '/mock-checkout/LayoutTests/platform/gtk/TestExpectations',
+ '/mock-checkout/LayoutTests/platform/wk2/TestExpectations',
+ '/mock-checkout/LayoutTests/platform/gtk-wk2/TestExpectations'])
+
def test_show_results_html_file(self):
port = self.make_port()
port._executive = MockExecutive(should_log=True)
@@ -52,14 +80,6 @@ class GtkPortTest(port_testcase.PortTestCase):
def test_default_timeout_ms(self):
self.assertEqual(self.make_port(options=MockOptions(configuration='Release')).default_timeout_ms(), 6000)
self.assertEqual(self.make_port(options=MockOptions(configuration='Debug')).default_timeout_ms(), 12000)
- self.assertEqual(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Debug')).default_timeout_ms(), 80000)
- self.assertEqual(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Release')).default_timeout_ms(), 80000)
-
- def assertLinesEqual(self, a, b):
- if hasattr(self, 'assertMultiLineEqual'):
- self.assertMultiLineEqual(a, b)
- else:
- self.assertEqual(a.splitlines(), b.splitlines())
def test_get_crash_log(self):
core_directory = os.environ.get('WEBKIT_CORE_DUMPS_DIRECTORY', '/path/to/coredumps')
@@ -83,8 +103,8 @@ STDERR: <empty>""" % locals()
port._get_gdb_output = mock_empty_crash_log
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=None)
self.assertEqual(stderr, "")
- self.assertLinesEqual(log, mock_empty_crash_log)
+ self.assertMultiLineEqual(log, mock_empty_crash_log)
stderr, log = port._get_crash_log("DumpRenderTree", 28529, "", "", newer_than=0.0)
self.assertEqual(stderr, "")
- self.assertLinesEqual(log, mock_empty_crash_log)
+ self.assertMultiLineEqual(log, mock_empty_crash_log)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py b/Tools/Scripts/webkitpy/port/http_lock.py
index c2eece3b0..bdde37e32 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/http_lock.py
+++ b/Tools/Scripts/webkitpy/port/http_lock.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
# Copyright (C) 2010 Andras Becsi (abecsi@inf.u-szeged.hu), University of Szeged
#
@@ -25,6 +24,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# FIXME: rename this file, and add more text about how this is
+# different from the base file_lock class.
+
"""This class helps to block NRWT threads when more NRWTs run
perf, http and websocket tests in a same time."""
@@ -43,7 +45,7 @@ _log = logging.getLogger(__name__)
class HttpLock(object):
- def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", guard_lock="WebKit.lock", filesystem=None, executive=None):
+ def __init__(self, lock_path, lock_file_prefix="WebKitHttpd.lock.", guard_lock="WebKit.lock", filesystem=None, executive=None, name='HTTP'):
self._executive = executive or Executive()
self._filesystem = filesystem or FileSystem()
self._lock_path = lock_path
@@ -55,6 +57,7 @@ class HttpLock(object):
self._guard_lock_file = self._filesystem.join(self._lock_path, guard_lock)
self._guard_lock = FileLock(self._guard_lock_file)
self._process_lock_file_name = ""
+ self._name = name
def cleanup_http_lock(self):
"""Delete the lock file if exists."""
@@ -124,11 +127,11 @@ class HttpLock(object):
"""Create a lock file and wait until it's turn comes. If something goes wrong
it wont do any locking."""
if not self._create_lock_file():
- _log.debug("Warning, http locking failed!")
+ _log.debug("Warning, %s locking failed!" % self._name)
return
# FIXME: This can hang forever!
while self._current_lock_pid() != os.getpid():
time.sleep(1)
- _log.debug("HTTP lock acquired")
+ _log.debug("%s lock acquired" % self._name)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py b/Tools/Scripts/webkitpy/port/http_lock_unittest.py
index 650a7670f..25af12fab 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/http_lock_unittest.py
+++ b/Tools/Scripts/webkitpy/port/http_lock_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
#
# All rights reserved.
@@ -26,7 +25,7 @@
from http_lock import HttpLock
import os # Used for os.getpid()
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py b/Tools/Scripts/webkitpy/port/image_diff.py
index 72d061ffc..4f1b059b4 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
+++ b/Tools/Scripts/webkitpy/port/image_diff.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
@@ -35,7 +34,7 @@ import logging
import re
import time
-from webkitpy.layout_tests.port import server_process
+from webkitpy.port import server_process
_log = logging.getLogger(__name__)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py b/Tools/Scripts/webkitpy/port/image_diff_unittest.py
index 0e5f0a0ff..c06192eb0 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
+++ b/Tools/Scripts/webkitpy/port/image_diff_unittest.py
@@ -28,10 +28,10 @@
"""Unit testing base class for Port implementations."""
-import unittest
+import unittest2 as unittest
-from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
-from webkitpy.layout_tests.port.image_diff import ImageDiffer
+from webkitpy.port.server_process_mock import MockServerProcess
+from webkitpy.port.image_diff import ImageDiffer
class FakePort(object):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/leakdetector.py b/Tools/Scripts/webkitpy/port/leakdetector.py
index f46cd34e5..f46cd34e5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/leakdetector.py
+++ b/Tools/Scripts/webkitpy/port/leakdetector.py
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/leakdetector_unittest.py b/Tools/Scripts/webkitpy/port/leakdetector_unittest.py
index 09d37d711..d81f2b211 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/leakdetector_unittest.py
+++ b/Tools/Scripts/webkitpy/port/leakdetector_unittest.py
@@ -26,9 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
-from webkitpy.layout_tests.port.leakdetector import LeakDetector
+from webkitpy.port.leakdetector import LeakDetector
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive_mock import MockExecutive
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/port/mac.py
index 4415b8a62..0b24ce70e 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mac.py
+++ b/Tools/Scripts/webkitpy/port/mac.py
@@ -1,5 +1,5 @@
# Copyright (C) 2011 Google Inc. All rights reserved.
-# Copyright (C) 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -29,15 +29,12 @@
import logging
import os
-import re
-import subprocess
-import sys
import time
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.executive import ScriptError
-from webkitpy.layout_tests.port.apple import ApplePort
-from webkitpy.layout_tests.port.leakdetector import LeakDetector
+from webkitpy.port.apple import ApplePort
+from webkitpy.port.leakdetector import LeakDetector
_log = logging.getLogger(__name__)
@@ -68,6 +65,9 @@ class MacPort(ApplePort):
return 350 * 1000
return super(MacPort, self).default_timeout_ms()
+ def supports_per_test_timeout(self):
+ return True
+
def _build_driver_flags(self):
return ['ARCHS=i386'] if self.architecture() == 'x86' else []
@@ -76,22 +76,27 @@ class MacPort(ApplePort):
return True
def default_baseline_search_path(self):
- if self._name.endswith(self.FUTURE_VERSION):
+ name = self._name.replace('-wk2', '')
+ if name.endswith(self.FUTURE_VERSION):
fallback_names = [self.port_name]
else:
- fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(self._name):-1] + [self.port_name]
+ fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(name):-1] + [self.port_name]
if self.get_option('webkit_test_runner'):
- fallback_names.insert(0, self._wk2_port_name())
- # Note we do not add 'wk2' here, even though it's included in _skipped_search_paths().
+ fallback_names = [self._wk2_port_name(), 'wk2'] + fallback_names
return map(self._webkit_baseline_path, fallback_names)
+ def _port_specific_expectations_files(self):
+ return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in self.baseline_search_path()]))
+
def setup_environ_for_server(self, server_name=None):
env = super(MacPort, self).setup_environ_for_server(server_name)
if server_name == self.driver_name():
if self.get_option('leaks'):
env['MallocStackLogging'] = '1'
if self.get_option('guard_malloc'):
- env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib'
+ env['DYLD_INSERT_LIBRARIES'] = '/usr/lib/libgmalloc.dylib:' + self._build_path("libWebCoreTestShim.dylib")
+ else:
+ env['DYLD_INSERT_LIBRARIES'] = self._build_path("libWebCoreTestShim.dylib")
env['XML_CATALOG_FILES'] = '' # work around missing /etc/catalog <rdar://problem/4292995>
return env
@@ -107,10 +112,8 @@ class MacPort(ApplePort):
return self._version == "lion"
def default_child_processes(self):
- # FIXME: The Printer isn't initialized when this is called, so using _log would just show an unitialized logger error.
-
if self._version == "snowleopard":
- print >> sys.stderr, "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525."
+ _log.warning("Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.")
return 1
default_count = super(MacPort, self).default_child_processes()
@@ -118,21 +121,27 @@ class MacPort(ApplePort):
# FIXME: https://bugs.webkit.org/show_bug.cgi?id=95906 With too many WebProcess WK2 tests get stuck in resource contention.
# To alleviate the issue reduce the number of running processes
# Anecdotal evidence suggests that a 4 core/8 core logical machine may run into this, but that a 2 core/4 core logical machine does not.
- if self.get_option('webkit_test_runner') and default_count > 4:
+ should_throttle_for_wk2 = self.get_option('webkit_test_runner') and default_count > 4
+ # We also want to throttle for leaks bots.
+ if should_throttle_for_wk2 or self.get_option('leaks'):
default_count = int(.75 * default_count)
# Make sure we have enough ram to support that many instances:
total_memory = self.host.platform.total_bytes_memory()
- bytes_per_drt = 256 * 1024 * 1024 # Assume each DRT needs 256MB to run.
- overhead = 2048 * 1024 * 1024 # Assume we need 2GB free for the O/S
- supportable_instances = max((total_memory - overhead) / bytes_per_drt, 1) # Always use one process, even if we don't have space for it.
- if supportable_instances < default_count:
- print >> sys.stderr, "This machine could support %s child processes, but only has enough memory for %s." % (default_count, supportable_instances)
+ if total_memory:
+ bytes_per_drt = 256 * 1024 * 1024 # Assume each DRT needs 256MB to run.
+ overhead = 2048 * 1024 * 1024 # Assume we need 2GB free for the O/S
+ supportable_instances = max((total_memory - overhead) / bytes_per_drt, 1) # Always use one process, even if we don't have space for it.
+ if supportable_instances < default_count:
+ _log.warning("This machine could support %s child processes, but only has enough memory for %s." % (default_count, supportable_instances))
+ else:
+ _log.warning("Cannot determine available memory for child processes, using default child process count of %s." % default_count)
+ supportable_instances = default_count
return min(supportable_instances, default_count)
def _build_java_test_support(self):
java_tests_path = self._filesystem.join(self.layout_tests_dir(), "java")
- build_java = ["/usr/bin/make", "-C", java_tests_path]
+ build_java = [self.make_command(), "-C", java_tests_path]
if self._executive.run_command(build_java, return_exit_code=True): # Paths are absolute, so we don't need to set a cwd.
_log.error("Failed to build Java support files: %s" % build_java)
return False
@@ -159,7 +168,7 @@ class MacPort(ApplePort):
_log.info("%s unique leaks found!" % unique_leaks)
def _check_port_build(self):
- return self._build_java_test_support()
+ return self.get_option('nojava') or self._build_java_test_support()
def _path_to_webcore_library(self):
return self._build_path('WebCore.framework/Versions/A/WebCore')
@@ -188,6 +197,9 @@ class MacPort(ApplePort):
def release_http_lock(self):
pass
+ def sample_file_path(self, name, pid):
+ return self._filesystem.join(self.results_directory(), "{0}-{1}-sample.txt".format(name, pid))
+
def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
# Note that we do slow-spin here and wait, since it appears the time
# ReportCrash takes to actually write and flush the file varies when there are
@@ -224,16 +236,25 @@ class MacPort(ApplePort):
crash_logs = {}
for (test_name, process_name, pid) in crashed_processes:
# Passing None for output. This is a second pass after the test finished so
- # if the output had any loggine we would have already collected it.
+ # if the output had any logging we would have already collected it.
crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
if not crash_log:
continue
crash_logs[test_name] = crash_log
return crash_logs
+ def look_for_new_samples(self, unresponsive_processes, start_time):
+ sample_files = {}
+ for (test_name, process_name, pid) in unresponsive_processes:
+ sample_file = self.sample_file_path(process_name, pid)
+ if not self._filesystem.isfile(sample_file):
+ continue
+ sample_files[test_name] = sample_file
+ return sample_files
+
def sample_process(self, name, pid):
try:
- hang_report = self._filesystem.join(self.results_directory(), "%s-%s.sample.txt" % (name, pid))
+ hang_report = self.sample_file_path(name, pid)
self._executive.run_command([
"/usr/bin/sample",
pid,
@@ -242,8 +263,8 @@ class MacPort(ApplePort):
"-file",
hang_report,
])
- except ScriptError, e:
- _log.warning('Unable to sample process.')
+ except ScriptError as e:
+ _log.warning('Unable to sample process:' + str(e))
def _path_to_helper(self):
binary_name = 'LayoutTestHelper'
@@ -253,9 +274,8 @@ class MacPort(ApplePort):
helper_path = self._path_to_helper()
if helper_path:
_log.debug("Starting layout helper %s" % helper_path)
- # Note: Not thread safe: http://bugs.python.org/issue2320
self._helper = self._executive.popen([helper_path],
- stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
+ stdin=self._executive.PIPE, stdout=self._executive.PIPE, stderr=None)
is_ready = self._helper.stdout.readline()
if not is_ready.startswith('ready'):
_log.error("LayoutTestHelper failed to be ready")
@@ -269,12 +289,17 @@ class MacPort(ApplePort):
self._helper.wait()
except IOError, e:
_log.debug("IOError raised while stopping helper: %s" % str(e))
- pass
self._helper = None
+ def make_command(self):
+ return self.xcrun_find('make', '/usr/bin/make')
+
def nm_command(self):
+ return self.xcrun_find('nm', 'nm')
+
+ def xcrun_find(self, command, fallback):
try:
- return self._executive.run_command(['xcrun', '-find', 'nm']).rstrip()
- except ScriptError, e:
- _log.warn("xcrun failed; falling back to 'nm'.")
- return 'nm'
+ return self._executive.run_command(['xcrun', '-find', command]).rstrip()
+ except ScriptError:
+ _log.warn("xcrun failed; falling back to '%s'." % fallback)
+ return fallback
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py b/Tools/Scripts/webkitpy/port/mac_unittest.py
index 831fbf7c9..e58904a1a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mac_unittest.py
+++ b/Tools/Scripts/webkitpy/port/mac_unittest.py
@@ -26,8 +26,8 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.layout_tests.port.mac import MacPort
-from webkitpy.layout_tests.port import port_testcase
+from webkitpy.port.mac import MacPort
+from webkitpy.port import port_testcase
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions
@@ -87,9 +87,9 @@ java/
port._filesystem = MockFileSystem(dirs=platform_dir_paths)
dirs_to_skip = port._tests_for_other_platforms()
- self.assertTrue('platform/chromium-linux' in dirs_to_skip)
- self.assertFalse('platform/mac' in dirs_to_skip)
- self.assertFalse('platform/mac-snowleopard' in dirs_to_skip)
+ self.assertIn('platform/chromium-linux', dirs_to_skip)
+ self.assertNotIn('platform/mac', dirs_to_skip)
+ self.assertNotIn('platform/mac-snowleopard', dirs_to_skip)
def test_version(self):
port = self.make_port()
@@ -116,7 +116,7 @@ java/
port = self.make_port(options=MockOptions(leaks=True, guard_malloc=True))
env = port.setup_environ_for_server(port.driver_name())
self.assertEqual(env['MallocStackLogging'], '1')
- self.assertEqual(env['DYLD_INSERT_LIBRARIES'], '/usr/lib/libgmalloc.dylib')
+ self.assertEqual(env['DYLD_INSERT_LIBRARIES'], '/usr/lib/libgmalloc.dylib:/mock-build/libWebCoreTestShim.dylib')
def _assert_search_path(self, port_name, baseline_path, search_paths, use_webkit2=False):
port = self.make_port(port_name=port_name, options=MockOptions(webkit_test_runner=use_webkit2))
@@ -130,10 +130,10 @@ java/
self._assert_search_path('mac-lion', 'mac-lion', ['mac-lion', 'mac'])
self._assert_search_path('mac-mountainlion', 'mac', ['mac'])
self._assert_search_path('mac-future', 'mac', ['mac'])
- self._assert_search_path('mac-snowleopard', 'mac-wk2', ['mac-wk2', 'mac-snowleopard', 'mac-lion', 'mac'], use_webkit2=True)
- self._assert_search_path('mac-lion', 'mac-wk2', ['mac-wk2', 'mac-lion', 'mac'], use_webkit2=True)
- self._assert_search_path('mac-mountainlion', 'mac-wk2', ['mac-wk2', 'mac'], use_webkit2=True)
- self._assert_search_path('mac-future', 'mac-wk2', ['mac-wk2', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-snowleopard', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-snowleopard', 'mac-lion', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-lion', 'mac-wk2', ['mac-wk2', 'wk2', 'mac-lion', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-mountainlion', 'mac-wk2', ['mac-wk2', 'wk2', 'mac'], use_webkit2=True)
+ self._assert_search_path('mac-future', 'mac-wk2', ['mac-wk2', 'wk2', 'mac'], use_webkit2=True)
def test_show_results_html_file(self):
port = self.make_port()
@@ -152,20 +152,20 @@ java/
bytes_for_drt = 200 * 1024 * 1024
port.host.platform.total_bytes_memory = lambda: bytes_for_drt
- expected_stderr = "This machine could support 2 child processes, but only has enough memory for 1.\n"
- child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_stderr=expected_stderr)
+ expected_logs = "This machine could support 2 child processes, but only has enough memory for 1.\n"
+ child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
# Make sure that we always use one process, even if we don't have the memory for it.
port.host.platform.total_bytes_memory = lambda: bytes_for_drt - 1
- expected_stderr = "This machine could support 2 child processes, but only has enough memory for 1.\n"
- child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_stderr=expected_stderr)
+ expected_logs = "This machine could support 2 child processes, but only has enough memory for 1.\n"
+ child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
# SnowLeopard has a CFNetwork bug which causes crashes if we execute more than one copy of DRT at once.
port = self.make_port(port_name='mac-snowleopard')
- expected_stderr = "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.\n"
- child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_stderr=expected_stderr)
+ expected_logs = "Cannot run tests in parallel on Snow Leopard due to rdar://problem/10621525.\n"
+ child_processes = OutputCapture().assert_outputs(self, port.default_child_processes, (), expected_logs=expected_logs)
self.assertEqual(child_processes, 1)
def test_get_crash_log(self):
@@ -221,7 +221,7 @@ java/
port = self.make_port()
port._executive = MockExecutive2(run_command_fn=logging_run_command)
- expected_stdout = "['/usr/bin/sample', 42, 10, 10, '-file', '/mock-build/layout-test-results/test-42.sample.txt']\n"
+ expected_stdout = "['/usr/bin/sample', 42, 10, 10, '-file', '/mock-build/layout-test-results/test-42-sample.txt']\n"
OutputCapture().assert_outputs(self, port.sample_process, args=['test', 42], expected_stdout=expected_stdout)
def test_sample_process_throws_exception(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py b/Tools/Scripts/webkitpy/port/mock_drt.py
index a2106fdd9..00591f13a 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt.py
+++ b/Tools/Scripts/webkitpy/port/mock_drt.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -50,8 +49,8 @@ if script_dir not in sys.path:
sys.path.append(script_dir)
from webkitpy.common.system.systemhost import SystemHost
-from webkitpy.layout_tests.port.driver import DriverInput, DriverOutput, DriverProxy
-from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.port.driver import DriverInput, DriverOutput, DriverProxy
+from webkitpy.port.factory import PortFactory
_log = logging.getLogger(__name__)
@@ -125,6 +124,9 @@ class MockDRTPort(object):
def release_http_lock(self):
pass
+ def show_results_html_file(self, results_filename):
+ pass
+
def main(argv, host, stdin, stdout, stderr):
"""Run the tests."""
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/port/mock_drt_unittest.py
index c4c43f5d8..46ee57bcc 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
+++ b/Tools/Scripts/webkitpy/port/mock_drt_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -30,14 +29,14 @@
"""Unit tests for MockDRT."""
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common import newstringio
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import mock_drt
-from webkitpy.layout_tests.port import port_testcase
-from webkitpy.layout_tests.port import test
-from webkitpy.layout_tests.port.factory import PortFactory
+from webkitpy.port import mock_drt
+from webkitpy.port import port_testcase
+from webkitpy.port import test
+from webkitpy.port.factory import PortFactory
from webkitpy.tool import mocktool
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/port/port_testcase.py
index 8ea108ba0..63cc1e990 100755..100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
+++ b/Tools/Scripts/webkitpy/port/port_testcase.py
@@ -34,14 +34,14 @@ import os
import socket
import sys
import time
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port.base import Port
-from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
+from webkitpy.port.base import Port
+from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.layout_tests.servers import http_server_base
from webkitpy.tool.mocktool import MockOptions
@@ -50,12 +50,13 @@ from webkitpy.tool.mocktool import MockOptions
class TestWebKitPort(Port):
port_name = "testwebkitport"
- def __init__(self, symbols_string=None,
+ def __init__(self, port_name=None, symbols_string=None,
expectations_file=None, skips_file=None, host=None, config=None,
**kwargs):
+ port_name = port_name or TestWebKitPort.port_name
self.symbols_string = symbols_string # Passing "" disables all staticly-detectable features.
host = host or MockSystemHost()
- super(TestWebKitPort, self).__init__(host=host, **kwargs)
+ super(TestWebKitPort, self).__init__(host, port_name=port_name, **kwargs)
def all_test_configurations(self):
return [self.test_configuration()]
@@ -114,9 +115,6 @@ class PortTestCase(unittest.TestCase):
self.assertTrue('--foo=bar' in cmd_line)
self.assertTrue('--foo=baz' in cmd_line)
- def test_uses_apache(self):
- self.assertTrue(self.make_port()._uses_apache())
-
def assert_servers_are_down(self, host, ports):
for port in ports:
try:
@@ -423,7 +421,8 @@ class PortTestCase(unittest.TestCase):
for path in port.expectations_files():
port._filesystem.write_text_file(path, '')
ordered_dict = port.expectations_dict()
- self.assertEqual(port.path_to_test_expectations_file(), ordered_dict.keys()[0])
+ self.assertEqual(port.path_to_generic_test_expectations_file(), ordered_dict.keys()[0])
+ self.assertEqual(port.path_to_test_expectations_file(), ordered_dict.keys()[1])
options = MockOptions(additional_expectations=['/tmp/foo', '/tmp/bar'])
port = self.make_port(options=options)
@@ -432,7 +431,7 @@ class PortTestCase(unittest.TestCase):
port._filesystem.write_text_file('/tmp/foo', 'foo')
port._filesystem.write_text_file('/tmp/bar', 'bar')
ordered_dict = port.expectations_dict()
- self.assertEqual(ordered_dict.keys()[-2:], options.additional_expectations) # pylint: disable-msg=E1101
+ self.assertEqual(ordered_dict.keys()[-2:], options.additional_expectations) # pylint: disable=E1101
self.assertEqual(ordered_dict.values()[-2:], ['foo', 'bar'])
def test_path_to_test_expectations_file(self):
@@ -449,37 +448,10 @@ class PortTestCase(unittest.TestCase):
port._options = MockOptions(webkit_test_runner=False)
self.assertEqual(port.path_to_test_expectations_file(), '/mock-checkout/LayoutTests/platform/testwebkitport/TestExpectations')
- def test_skipped_directories_for_symbols(self):
- # This first test confirms that the commonly found symbols result in the expected skipped directories.
- symbols_string = " ".join(["GraphicsLayer", "WebCoreHas3DRendering", "isXHTMLMPDocument", "fooSymbol"])
- expected_directories = set([
- "mathml", # Requires MathMLElement
- "fast/canvas/webgl", # Requires WebGLShader
- "compositing/webgl", # Requires WebGLShader
- "http/tests/canvas/webgl", # Requires WebGLShader
- "mhtml", # Requires MHTMLArchive
- "fast/css/variables", # Requires CSS Variables
- "inspector/styles/variables", # Requires CSS Variables
- ])
-
- result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
- self.assertEqual(result_directories, expected_directories)
-
- # Test that the nm string parsing actually works:
- symbols_string = """
-000000000124f498 s __ZZN7WebCore13GraphicsLayer12replaceChildEPS0_S1_E19__PRETTY_FUNCTION__
-000000000124f500 s __ZZN7WebCore13GraphicsLayer13addChildAboveEPS0_S1_E19__PRETTY_FUNCTION__
-000000000124f670 s __ZZN7WebCore13GraphicsLayer13addChildBelowEPS0_S1_E19__PRETTY_FUNCTION__
-"""
- # Note 'compositing' is not in the list of skipped directories (hence the parsing of GraphicsLayer worked):
- expected_directories = set(['mathml', 'transforms/3d', 'compositing/webgl', 'fast/canvas/webgl', 'animations/3d', 'mhtml', 'http/tests/canvas/webgl', 'fast/css/variables', 'inspector/styles/variables'])
- result_directories = set(TestWebKitPort(symbols_string, None)._skipped_tests_for_unsupported_features(test_list=['mathml/foo.html']))
- self.assertEqual(result_directories, expected_directories)
-
def test_skipped_directories_for_features(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set(["animations/3d", "transforms/3d"])
- port = TestWebKitPort(None, supported_features)
+ port = TestWebKitPort(supported_features=supported_features)
port._runtime_feature_list = lambda: supported_features
result_directories = set(port._skipped_tests_for_unsupported_features(test_list=["animations/3d/foo.html"]))
self.assertEqual(result_directories, expected_directories)
@@ -487,17 +459,17 @@ class PortTestCase(unittest.TestCase):
def test_skipped_directories_for_features_no_matching_tests_in_test_list(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set([])
- result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
+ result_directories = set(TestWebKitPort(supported_features=supported_features)._skipped_tests_for_unsupported_features(test_list=['foo.html']))
self.assertEqual(result_directories, expected_directories)
def test_skipped_tests_for_unsupported_features_empty_test_list(self):
supported_features = ["Accelerated Compositing", "Foo Feature"]
expected_directories = set([])
- result_directories = set(TestWebKitPort(None, supported_features)._skipped_tests_for_unsupported_features(test_list=None))
+ result_directories = set(TestWebKitPort(supported_features=supported_features)._skipped_tests_for_unsupported_features(test_list=None))
self.assertEqual(result_directories, expected_directories)
def test_skipped_layout_tests(self):
- self.assertEqual(TestWebKitPort(None, None).skipped_layout_tests(test_list=[]), set(['media']))
+ self.assertEqual(TestWebKitPort().skipped_layout_tests(test_list=[]), set(['media']))
def test_expectations_files(self):
port = TestWebKitPort()
@@ -505,16 +477,17 @@ class PortTestCase(unittest.TestCase):
def platform_dirs(port):
return [port.host.filesystem.basename(port.host.filesystem.dirname(f)) for f in port.expectations_files()]
- self.assertEqual(platform_dirs(port), ['testwebkitport'])
+ self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport'])
- port._name = "testwebkitport-version"
- self.assertEqual(platform_dirs(port), ['testwebkitport', 'testwebkitport-version'])
+ port = TestWebKitPort(port_name="testwebkitport-version")
+ self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport', 'testwebkitport-version'])
- port._options = MockOptions(webkit_test_runner=True)
- self.assertEqual(platform_dirs(port), ['testwebkitport', 'testwebkitport-version', 'testwebkitport-wk2', 'wk2'])
+ port = TestWebKitPort(port_name="testwebkitport-version-wk2")
+ self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport', 'testwebkitport-version', 'wk2', 'testwebkitport-wk2'])
- port._options = MockOptions(additional_platform_directory=["internal-testwebkitport"])
- self.assertEqual(platform_dirs(port), ['testwebkitport', 'testwebkitport-version', 'internal-testwebkitport'])
+ port = TestWebKitPort(port_name="testwebkitport-version",
+ options=MockOptions(additional_platform_directory=["internal-testwebkitport"]))
+ self.assertEqual(platform_dirs(port), ['LayoutTests', 'testwebkitport', 'testwebkitport-version', 'internal-testwebkitport'])
def test_root_option(self):
port = TestWebKitPort()
@@ -579,6 +552,10 @@ MOCK output of child process
self.assertFalse(port._is_redhat_based())
self.assertTrue(port._is_debian_based())
+ port._filesystem = MockFileSystem({'/etc/arch-release': ''})
+ self.assertFalse(port._is_redhat_based())
+ self.assertTrue(port._is_arch_based())
+
def test_apache_config_file_name_for_platform(self):
port = TestWebKitPort()
self._assert_config_file_for_platform(port, 'cygwin', 'cygwin-httpd.conf')
@@ -587,11 +564,13 @@ MOCK output of child process
self._assert_config_file_for_platform(port, 'linux3', 'apache2-httpd.conf')
port._is_redhat_based = lambda: True
- self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd.conf')
+ port._apache_version = lambda: '2.2'
+ self._assert_config_file_for_platform(port, 'linux2', 'fedora-httpd-2.2.conf')
port = TestWebKitPort()
port._is_debian_based = lambda: True
- self._assert_config_file_for_platform(port, 'linux2', 'apache2-debian-httpd.conf')
+ port._apache_version = lambda: '2.2'
+ self._assert_config_file_for_platform(port, 'linux2', 'debian-httpd-2.2.conf')
self._assert_config_file_for_platform(port, 'mac', 'apache2-httpd.conf')
self._assert_config_file_for_platform(port, 'win32', 'apache2-httpd.conf') # win32 isn't a supported sys.platform. AppleWin/WinCairo/WinCE ports all use cygwin.
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py b/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer.py
index f4574a92f..465b92158 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/pulseaudio_sanitizer.py
+++ b/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer.py
@@ -37,7 +37,7 @@ _log = logging.getLogger(__name__)
# Shared by GTK and EFL for pulseaudio sanitizing before running tests.
class PulseAudioSanitizer:
- def _unload_pulseaudio_module(self):
+ def unload_pulseaudio_module(self):
# Unload pulseaudio's module-stream-restore, since it remembers
# volume settings from different runs, and could affect
# multimedia tests results
@@ -71,7 +71,7 @@ class PulseAudioSanitizer:
_log.debug('Unable to parse module index. Please check if your pulseaudio-utils version is too old.')
return
- def _restore_pulseaudio_module(self):
+ def restore_pulseaudio_module(self):
# If pulseaudio's module-stream-restore was previously unloaded,
# restore it back. We shouldn't need extra checks here, since an
# index != -1 here means we successfully unloaded it previously.
diff --git a/Tools/Scripts/webkitpy/common/checkout/deps_mock.py b/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer_mock.py
index 423debae0..88a962a2e 100644
--- a/Tools/Scripts/webkitpy/common/checkout/deps_mock.py
+++ b/Tools/Scripts/webkitpy/port/pulseaudio_sanitizer_mock.py
@@ -1,16 +1,16 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyrigth (C) 2013 Zan Dobersek <zandobersek@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
-# * Redistributions of source code must retain the above copyright
+# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
+# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
-# * Neither the name of Google Inc. nor the names of its
+# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
@@ -26,14 +26,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import logging
-_log = logging.getLogger(__name__)
+class PulseAudioSanitizerMock:
+ def unload_pulseaudio_module(self):
+ pass
-
-class MockDEPS(object):
- def read_variable(self, name):
- return 6564
-
- def write_variable(self, name, value):
- _log.info("MOCK: MockDEPS.write_variable(%s, %s)" % (name, value))
+ def restore_pulseaudio_module(self):
+ pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt.py b/Tools/Scripts/webkitpy/port/qt.py
index 55f13ee8c..5db5d8eea 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/qt.py
+++ b/Tools/Scripts/webkitpy/port/qt.py
@@ -33,11 +33,12 @@ import logging
import re
import sys
import os
+import platform
from webkitpy.common.memoized import memoized
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
-from webkitpy.layout_tests.port.base import Port
-from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+from webkitpy.port.base import Port
+from webkitpy.port.xvfbdriver import XvfbDriver
_log = logging.getLogger(__name__)
@@ -47,7 +48,7 @@ class QtPort(Port):
port_name = "qt"
def _wk2_port_name(self):
- return "qt-5.0-wk2"
+ return "qt-wk2"
def _port_flag_for_scripts(self):
return "--qt"
@@ -62,8 +63,6 @@ class QtPort(Port):
def __init__(self, host, port_name, **kwargs):
super(QtPort, self).__init__(host, port_name, **kwargs)
- # FIXME: This will allow Port.baseline_search_path
- # to do the right thing, but doesn't include support for qt-4.8 or qt-arm (seen in LayoutTests/platform) yet.
self._operating_system = port_name.replace('qt-', '')
# FIXME: Why is this being set at all?
@@ -80,6 +79,9 @@ class QtPort(Port):
# The Qt port builds DRT as part of the main build step
return True
+ def supports_per_test_timeout(self):
+ return True
+
def _path_to_driver(self):
return self._build_path('bin/%s' % self.driver_name())
@@ -90,7 +92,7 @@ class QtPort(Port):
if self.operating_system() == 'mac':
return self._build_path('lib/QtWebKitWidgets.framework/QtWebKitWidgets')
else:
- return self._build_path('lib/libQtWebKitWidgets.so')
+ return self._build_path('lib/libQt5WebKitWidgets.so')
def _modules_to_search_for_symbols(self):
# We search in every library to be reliable in the case of building with CONFIG+=force_static_libs_as_shared.
@@ -111,28 +113,29 @@ class QtPort(Port):
version = match.group('version')
break
except OSError:
- version = '4.8'
+ version = '5.0'
return version
def _search_paths(self):
- # qt-5.0-wk1 qt-5.0-wk2
- # \/
- # qt-5.0 qt-4.8
- # \/
+ # qt-mac-wk2
+ # /
+ # qt-wk1 qt-wk2
+ # \/
+ # qt-5.x
+ # \
# (qt-linux|qt-mac|qt-win)
# |
# qt
search_paths = []
- version = self.qt_version()
- if '5.0' in version:
- if self.get_option('webkit_test_runner'):
- search_paths.append('qt-5.0-wk2')
- else:
- search_paths.append('qt-5.0-wk1')
- if '4.8' in version:
- search_paths.append('qt-4.8')
- elif version:
- search_paths.append('qt-5.0')
+ if self.get_option('webkit_test_runner'):
+ if self.operating_system() == 'mac':
+ search_paths.append('qt-mac-wk2')
+ search_paths.append('qt-wk2')
+ else:
+ search_paths.append('qt-wk1')
+
+ search_paths.append('qt-' + self.qt_version())
+
search_paths.append(self.port_name + '-' + self.operating_system())
search_paths.append(self.port_name)
return search_paths
@@ -140,13 +143,13 @@ class QtPort(Port):
def default_baseline_search_path(self):
return map(self._webkit_baseline_path, self._search_paths())
- def expectations_files(self):
+ def _port_specific_expectations_files(self):
paths = self._search_paths()
if self.get_option('webkit_test_runner'):
paths.append('wk2')
# expectations_files() uses the directories listed in _search_paths reversed.
- # e.g. qt -> qt-linux -> qt-4.8
+ # e.g. qt -> qt-linux -> qt-5.x -> qt-wk1
return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in paths]))
def setup_environ_for_server(self, server_name=None):
@@ -182,3 +185,6 @@ class QtPort(Port):
return False
return result
+ # Qt port is not ready for parallel testing, see https://bugs.webkit.org/show_bug.cgi?id=77730 for details.
+ def default_child_processes(self):
+ return 1
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py b/Tools/Scripts/webkitpy/port/qt_unittest.py
index d6ef8d85e..2338f673b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/qt_unittest.py
+++ b/Tools/Scripts/webkitpy/port/qt_unittest.py
@@ -26,15 +26,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
import os
from copy import deepcopy
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import port_testcase
-from webkitpy.layout_tests.port.qt import QtPort
+from webkitpy.port import port_testcase
+from webkitpy.port.qt import QtPort
from webkitpy.tool.mocktool import MockOptions
@@ -42,24 +42,17 @@ class QtPortTest(port_testcase.PortTestCase):
port_name = 'qt-mac'
port_maker = QtPort
search_paths_cases = [
- {'search_paths':['qt-4.8', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False, 'qt_version':'4.8'},
- {'search_paths':['qt-4.8', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False, 'qt_version':'4.8'},
- {'search_paths':['qt-4.8', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'4.8'},
-
- {'search_paths':['qt-4.8', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False},
- {'search_paths':['qt-4.8', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False},
- {'search_paths':['qt-4.8', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False},
-
- {'search_paths':['qt-5.0-wk2', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':True, 'qt_version':'5.0'},
- {'search_paths':['qt-5.0-wk2', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':True, 'qt_version':'5.0'},
- {'search_paths':['qt-5.0-wk2', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':True, 'qt_version':'5.0'},
-
- {'search_paths':['qt-5.0-wk1', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False, 'qt_version':'5.0'},
- {'search_paths':['qt-5.0-wk1', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False, 'qt_version':'5.0'},
- {'search_paths':['qt-5.0-wk1', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'5.0'},
+ {'search_paths':['qt-mac-wk2', 'qt-wk2', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':True, 'qt_version':'5.0'},
+ {'search_paths':['qt-wk2', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':True, 'qt_version':'5.0'},
+ {'search_paths':['qt-wk2', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':True, 'qt_version':'5.0'},
+
+ {'search_paths':['qt-wk1', 'qt-5.0', 'qt-mac', 'qt'], 'os_name':'mac', 'use_webkit2':False, 'qt_version':'5.0'},
+ {'search_paths':['qt-wk1', 'qt-5.0', 'qt-win', 'qt'], 'os_name':'win', 'use_webkit2':False, 'qt_version':'5.0'},
+ {'search_paths':['qt-wk1', 'qt-5.0', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'5.0'},
+ {'search_paths':['qt-wk1', 'qt-5.1', 'qt-linux', 'qt'], 'os_name':'linux', 'use_webkit2':False, 'qt_version':'5.1'},
]
- def _assert_search_path(self, search_paths, os_name, use_webkit2=False, qt_version='4.8'):
+ def _assert_search_path(self, search_paths, os_name, use_webkit2=False, qt_version='5.0'):
# FIXME: Port constructors should not "parse" the port name, but
# rather be passed components (directly or via setters). Once
# we fix that, this method will need a re-write.
@@ -71,7 +64,7 @@ class QtPortTest(port_testcase.PortTestCase):
absolute_search_paths = map(port._webkit_baseline_path, search_paths)
self.assertEqual(port.baseline_search_path(), absolute_search_paths)
- def _assert_expectations_files(self, search_paths, os_name, use_webkit2=False, qt_version='4.8'):
+ def _assert_expectations_files(self, search_paths, os_name, use_webkit2=False, qt_version='5.0'):
# FIXME: Port constructors should not "parse" the port name, but
# rather be passed components (directly or via setters). Once
# we fix that, this method will need a re-write.
@@ -83,10 +76,10 @@ class QtPortTest(port_testcase.PortTestCase):
self.assertEqual(port.expectations_files(), search_paths)
def _qt_version(self, qt_version):
- if qt_version in '4.8':
- return 'QMake version 2.01a\nUsing Qt version 4.8.0 in /usr/local/Trolltech/Qt-4.8.2/lib'
if qt_version in '5.0':
return 'QMake version 2.01a\nUsing Qt version 5.0.0 in /usr/local/Trolltech/Qt-5.0.0/lib'
+ if qt_version in '5.1':
+ return 'QMake version 3.0\nUsing Qt version 5.1.1 in /usr/local/Qt-5.1/lib'
def test_baseline_search_path(self):
for case in self.search_paths_cases:
@@ -97,8 +90,9 @@ class QtPortTest(port_testcase.PortTestCase):
expectations_case = deepcopy(case)
if expectations_case['use_webkit2']:
expectations_case['search_paths'].append("wk2")
+ expectations_case['search_paths'].append('')
expectations_case['search_paths'].reverse()
- expectations_case['search_paths'] = map(lambda path: '/mock-checkout/LayoutTests/platform/%s/TestExpectations' % (path), expectations_case['search_paths'])
+ expectations_case['search_paths'] = map(lambda path: '/mock-checkout/LayoutTests/TestExpectations' if not path else '/mock-checkout/LayoutTests/platform/%s/TestExpectations' % (path), expectations_case['search_paths'])
self._assert_expectations_files(**expectations_case)
def test_show_results_html_file(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py b/Tools/Scripts/webkitpy/port/server_process.py
index 8f0cda9ba..8184d2587 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process.py
+++ b/Tools/Scripts/webkitpy/port/server_process.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -117,6 +116,7 @@ class ServerProcess(object):
env=self._env,
universal_newlines=self._universal_newlines)
self._pid = self._proc.pid
+ self._port.find_system_pid(self.name(), self._pid)
fd = self._proc.stdout.fileno()
if not self._use_win32_apis:
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py b/Tools/Scripts/webkitpy/port/server_process_mock.py
index d234ebdc3..d234ebdc3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
+++ b/Tools/Scripts/webkitpy/port/server_process_mock.py
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py b/Tools/Scripts/webkitpy/port/server_process_unittest.py
index cd1db1f96..5ad7d0ea7 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_unittest.py
+++ b/Tools/Scripts/webkitpy/port/server_process_unittest.py
@@ -28,10 +28,10 @@
import sys
import time
-import unittest
+import unittest2 as unittest
-from webkitpy.layout_tests.port.factory import PortFactory
-from webkitpy.layout_tests.port import server_process
+from webkitpy.port.factory import PortFactory
+from webkitpy.port import server_process
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.common.system.outputcapture import OutputCapture
@@ -140,13 +140,13 @@ class TestServerProcess(unittest.TestCase):
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
self.assertTrue(server_process.has_crashed())
- self.assertNotEquals(server_process.pid(), None)
- self.assertEqual(server_process._proc, None)
+ self.assertIsNotNone(server_process.pid())
+ self.assertIsNone(server_process._proc)
self.assertEqual(server_process.broken_pipes, [server_process.stdin])
port_obj.host.platform.os_name = 'mac'
server_process = FakeServerProcess(port_obj=port_obj, name="test", cmd=["test"])
server_process.write("should break")
self.assertTrue(server_process.has_crashed())
- self.assertEqual(server_process._proc, None)
+ self.assertIsNone(server_process._proc)
self.assertEqual(server_process.broken_pipes, [server_process.stdin])
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/port/test.py
index f7dd2919e..76fea3ba1 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/port/test.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -31,8 +30,8 @@ import base64
import sys
import time
-from webkitpy.layout_tests.port import Port, Driver, DriverOutput
-from webkitpy.layout_tests.port.base import VirtualTestSuite
+from webkitpy.port import Port, Driver, DriverOutput
+from webkitpy.port.base import VirtualTestSuite
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.crashlogs import CrashLogs
@@ -98,6 +97,15 @@ class TestList(object):
def __getitem__(self, item):
return self.tests[item]
+#
+# These numbers may need to be updated whenever we add or delete tests.
+#
+TOTAL_TESTS = 106
+TOTAL_SKIPS = 28
+TOTAL_RETRIES = 14
+
+UNEXPECTED_PASSES = 6
+UNEXPECTED_FAILURES = 17
def unit_test_list():
tests = TestList()
@@ -339,6 +347,7 @@ Bug(test) passes/skipped/skip.html [ Skip ]
class TestPort(Port):
port_name = 'test'
+ default_port_name = 'test-mac-leopard'
"""Test implementation of the Port interface."""
ALL_BASELINE_VARIANTS = (
@@ -350,23 +359,20 @@ class TestPort(Port):
@classmethod
def determine_full_port_name(cls, host, options, port_name):
if port_name == 'test':
- return 'test-mac-leopard'
+ return TestPort.default_port_name
return port_name
def __init__(self, host, port_name=None, **kwargs):
- # FIXME: Consider updating all of the callers to pass in a port_name so it can be a
- # required parameter like all of the other Port objects.
- port_name = port_name or 'test-mac-leopard'
- Port.__init__(self, host, port_name, **kwargs)
+ Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
self._tests = unit_test_list()
self._flakes = set()
self._expectations_path = LAYOUT_TEST_DIR + '/platform/test/TestExpectations'
self._results_directory = None
self._operating_system = 'mac'
- if port_name.startswith('test-win'):
+ if self._name.startswith('test-win'):
self._operating_system = 'win'
- elif port_name.startswith('test-linux'):
+ elif self._name.startswith('test-linux'):
self._operating_system = 'linux'
version_map = {
@@ -377,7 +383,7 @@ class TestPort(Port):
'test-mac-snowleopard': 'snowleopard',
'test-linux-x86_64': 'lucid',
}
- self._version = version_map[port_name]
+ self._version = version_map[self._name]
def default_pixel_tests(self):
return True
@@ -434,9 +440,7 @@ class TestPort(Port):
def webkit_base(self):
return '/test.checkout'
- def skipped_layout_tests(self, test_list):
- # This allows us to test the handling Skipped files, both with a test
- # that actually passes, and a test that does fail.
+ def _skipped_tests_for_unsupported_features(self, test_list):
return set(['failures/expected/skip_text.html',
'failures/unexpected/skip_pass.html',
'virtual/skipped'])
@@ -536,12 +540,23 @@ class TestPort(Port):
class TestDriver(Driver):
"""Test/Dummy implementation of the DumpRenderTree interface."""
+ next_pid = 1
+
+ def __init__(self, *args, **kwargs):
+ super(TestDriver, self).__init__(*args, **kwargs)
+ self.started = False
+ self.pid = 0
def cmd_line(self, pixel_tests, per_test_args):
pixel_tests_flag = '-p' if pixel_tests else ''
return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
def run_test(self, test_input, stop_when_done):
+ if not self.started:
+ self.started = True
+ self.pid = TestDriver.next_pid
+ TestDriver.next_pid += 1
+
start_time = time.time()
test_name = test_input.test_name
test_args = test_input.args or []
@@ -589,10 +604,7 @@ class TestDriver(Driver):
return DriverOutput(actual_text, image, test.actual_checksum, audio,
crash=test.crash or test.web_process_crash, crashed_process_name=crashed_process_name,
crashed_pid=crashed_pid, crash_log=crash_log,
- test_time=time.time() - start_time, timeout=test.timeout, error=test.error)
-
- def start(self, pixel_tests, per_test_args):
- pass
+ test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid)
def stop(self):
- pass
+ self.started = False
diff --git a/Tools/Scripts/webkitpy/port/win.py b/Tools/Scripts/webkitpy/port/win.py
new file mode 100644
index 000000000..18175d60b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/port/win.py
@@ -0,0 +1,284 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+# Copyright (C) 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the Google name nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import atexit
+import os
+import logging
+import re
+import sys
+import time
+
+from webkitpy.common.system.crashlogs import CrashLogs
+from webkitpy.common.system.systemhost import SystemHost
+from webkitpy.common.system.executive import ScriptError, Executive
+from webkitpy.common.system.path import abspath_to_uri, cygpath
+from webkitpy.port.apple import ApplePort
+
+
+_log = logging.getLogger(__name__)
+
+
+class WinPort(ApplePort):
+ port_name = "win"
+
+ VERSION_FALLBACK_ORDER = ["win-xp", "win-vista", "win-7sp0", "win"]
+
+ ARCHITECTURES = ['x86']
+
+ CRASH_LOG_PREFIX = "CrashLog"
+
+ POST_MORTEM_DEBUGGER_KEY = "/HKLM/SOFTWARE/Microsoft/Windows NT/CurrentVersion/AeDebug/%s"
+
+ previous_debugger_values = {}
+
+ def do_text_results_differ(self, expected_text, actual_text):
+ # Sanity was restored in WK2, so we don't need this hack there.
+ if self.get_option('webkit_test_runner'):
+ return ApplePort.do_text_results_differ(self, expected_text, actual_text)
+
+ # This is a hack (which dates back to ORWT).
+ # Windows does not have an EDITING DELEGATE, so we strip any EDITING DELEGATE
+ # messages to make more of the tests pass.
+ # It's possible more of the ports might want this and this could move down into WebKitPort.
+ delegate_regexp = re.compile("^EDITING DELEGATE: .*?\n", re.MULTILINE)
+ expected_text = delegate_regexp.sub("", expected_text)
+ actual_text = delegate_regexp.sub("", actual_text)
+ return expected_text != actual_text
+
+ def default_baseline_search_path(self):
+ name = self._name.replace('-wk2', '')
+ if name.endswith(self.FUTURE_VERSION):
+ fallback_names = [self.port_name]
+ else:
+ fallback_names = self.VERSION_FALLBACK_ORDER[self.VERSION_FALLBACK_ORDER.index(name):-1] + [self.port_name]
+ # FIXME: The AppleWin port falls back to AppleMac for some results. Eventually we'll have a shared 'apple' port.
+ if self.get_option('webkit_test_runner'):
+ fallback_names.insert(0, 'win-wk2')
+ fallback_names.append('mac-wk2')
+ # Note we do not add 'wk2' here, even though it's included in _skipped_search_paths().
+ # FIXME: Perhaps we should get this list from MacPort?
+ fallback_names.extend(['mac-lion', 'mac'])
+ return map(self._webkit_baseline_path, fallback_names)
+
+ def operating_system(self):
+ return 'win'
+
+ def show_results_html_file(self, results_filename):
+ self._run_script('run-safari', [abspath_to_uri(SystemHost().platform, results_filename)])
+
+ # FIXME: webkitperl/httpd.pm installs /usr/lib/apache/libphp4.dll on cycwin automatically
+ # as part of running old-run-webkit-tests. That's bad design, but we may need some similar hack.
+ # We might use setup_environ_for_server for such a hack (or modify apache_http_server.py).
+
+ def _runtime_feature_list(self):
+ supported_features_command = [self._path_to_driver(), '--print-supported-features']
+ try:
+ output = self._executive.run_command(supported_features_command, error_handler=Executive.ignore_error)
+ except OSError, e:
+ _log.warn("Exception running driver: %s, %s. Driver must be built before calling WebKitPort.test_expectations()." % (supported_features_command, e))
+ return None
+
+ # Note: win/DumpRenderTree.cpp does not print a leading space before the features_string.
+ match_object = re.match("SupportedFeatures:\s*(?P<features_string>.*)\s*", output)
+ if not match_object:
+ return None
+ return match_object.group('features_string').split(' ')
+
+ # Note: These are based on the stock Cygwin locations for these files.
+ def _uses_apache(self):
+ return False
+
+ def _path_to_lighttpd(self):
+ return "/usr/sbin/lighttpd"
+
+ def _path_to_lighttpd_modules(self):
+ return "/usr/lib/lighttpd"
+
+ def _path_to_lighttpd_php(self):
+ return "/usr/bin/php-cgi"
+
+ def _driver_tempdir_for_environment(self):
+ return cygpath(self._driver_tempdir())
+
+ def test_search_path(self):
+ test_fallback_names = [path for path in self.baseline_search_path() if not path.startswith(self._webkit_baseline_path('mac'))]
+ return map(self._webkit_baseline_path, test_fallback_names)
+
+ def _ntsd_location(self):
+ possible_paths = [self._filesystem.join(os.environ['PROGRAMFILES'], "Windows Kits", "8.0", "Debuggers", "x86", "ntsd.exe"),
+ self._filesystem.join(os.environ['PROGRAMFILES'], "Windows Kits", "8.0", "Debuggers", "x64", "ntsd.exe"),
+ self._filesystem.join(os.environ['PROGRAMFILES'], "Debugging Tools for Windows (x86)", "ntsd.exe"),
+ self._filesystem.join(os.environ['ProgramW6432'], "Debugging Tools for Windows (x64)", "ntsd.exe"),
+ self._filesystem.join(os.environ['SYSTEMROOT'], "system32", "ntsd.exe")]
+ for path in possible_paths:
+ expanded_path = self._filesystem.expanduser(path)
+ if self._filesystem.exists(expanded_path):
+ _log.debug("Using ntsd located in '%s'" % path)
+ return expanded_path
+ return None
+
+ def create_debugger_command_file(self):
+ debugger_temp_directory = str(self._filesystem.mkdtemp())
+ command_file = self._filesystem.join(debugger_temp_directory, "debugger-commands.txt")
+ commands = ''.join(['.logopen /t "%s\\%s.txt"\n' % (cygpath(self.results_directory()), self.CRASH_LOG_PREFIX),
+ '.srcpath "%s"\n' % cygpath(self._webkit_finder.webkit_base()),
+ '!analyze -vv\n',
+ '~*kpn\n',
+ 'q\n'])
+ self._filesystem.write_text_file(command_file, commands)
+ return command_file
+
+ def read_registry_string(self, key):
+ registry_key = self.POST_MORTEM_DEBUGGER_KEY % key
+ read_registry_command = ["regtool", "--wow32", "get", registry_key]
+ value = self._executive.run_command(read_registry_command, error_handler=Executive.ignore_error)
+ return value.rstrip()
+
+ def write_registry_string(self, key, value):
+ registry_key = self.POST_MORTEM_DEBUGGER_KEY % key
+ set_reg_value_command = ["regtool", "--wow32", "set", "-s", str(registry_key), str(value)]
+ rc = self._executive.run_command(set_reg_value_command, return_exit_code=True)
+ if rc == 2:
+ add_reg_value_command = ["regtool", "--wow32", "add", "-s", str(registry_key)]
+ rc = self._executive.run_command(add_reg_value_command, return_exit_code=True)
+ if rc == 0:
+ rc = self._executive.run_command(set_reg_value_command, return_exit_code=True)
+ if rc:
+ _log.warn("Error setting key: %s to value %s. Error=%ld." % (key, value, rc))
+ return False
+
+ # On Windows Vista/7 with UAC enabled, regtool will fail to modify the registry, but will still
+ # return a successful exit code. So we double-check here that the value we tried to write to the
+ # registry was really written.
+ if self.read_registry_string(key) != value:
+ _log.warn("Regtool reported success, but value of key %s did not change." % key)
+ return False
+
+ return True
+
+ def setup_crash_log_saving(self):
+ if '_NT_SYMBOL_PATH' not in os.environ:
+ _log.warning("The _NT_SYMBOL_PATH environment variable is not set. Crash logs will not be saved.")
+ return None
+ ntsd_path = self._ntsd_location()
+ if not ntsd_path:
+ _log.warning("Can't find ntsd.exe. Crash logs will not be saved.")
+ return None
+ # If we used -c (instead of -cf) we could pass the commands directly on the command line. But
+ # when the commands include multiple quoted paths (e.g., for .logopen and .srcpath), Windows
+ # fails to invoke the post-mortem debugger at all (perhaps due to a bug in Windows's command
+ # line parsing). So we save the commands to a file instead and tell the debugger to execute them
+ # using -cf.
+ command_file = self.create_debugger_command_file()
+ if not command_file:
+ return None
+ debugger_options = '"{0}" -p %ld -e %ld -g -noio -lines -cf "{1}"'.format(cygpath(ntsd_path), cygpath(command_file))
+ registry_settings = {'Debugger': debugger_options, 'Auto': "1"}
+ for key in registry_settings:
+ self.previous_debugger_values[key] = self.read_registry_string(key)
+ self.write_registry_string(key, registry_settings[key])
+
+ def restore_crash_log_saving(self):
+ for key in self.previous_debugger_values:
+ self.write_registry_string(key, self.previous_debugger_values[key])
+
+ def setup_test_run(self):
+ atexit.register(self.restore_crash_log_saving)
+ self.setup_crash_log_saving()
+ super(WinPort, self).setup_test_run()
+
+ def clean_up_test_run(self):
+ self.restore_crash_log_saving()
+ super(WinPort, self).clean_up_test_run()
+
+ def _get_crash_log(self, name, pid, stdout, stderr, newer_than, time_fn=None, sleep_fn=None, wait_for_log=True):
+ # Note that we do slow-spin here and wait, since it appears the time
+ # ReportCrash takes to actually write and flush the file varies when there are
+ # lots of simultaneous crashes going on.
+ # FIXME: Should most of this be moved into CrashLogs()?
+ time_fn = time_fn or time.time
+ sleep_fn = sleep_fn or time.sleep
+ crash_log = ''
+ crash_logs = CrashLogs(self.host, self.results_directory())
+ now = time_fn()
+ # FIXME: delete this after we're sure this code is working ...
+ _log.debug('looking for crash log for %s:%s' % (name, str(pid)))
+ deadline = now + 5 * int(self.get_option('child_processes', 1))
+ while not crash_log and now <= deadline:
+ # If the system_pid hasn't been determined yet, just try with the passed in pid. We'll be checking again later
+ system_pid = self._executive.pid_to_system_pid.get(pid)
+ if system_pid == None:
+ break # We haven't mapped cygwin pid->win pid yet
+ crash_log = crash_logs.find_newest_log(name, system_pid, include_errors=True, newer_than=newer_than)
+ if not wait_for_log:
+ break
+ if not crash_log or not [line for line in crash_log.splitlines() if line.startswith('quit:')]:
+ sleep_fn(0.1)
+ now = time_fn()
+
+ if not crash_log:
+ return (stderr, None)
+ return (stderr, crash_log)
+
+ def look_for_new_crash_logs(self, crashed_processes, start_time):
+ """Since crash logs can take a long time to be written out if the system is
+ under stress do a second pass at the end of the test run.
+
+ crashes: test_name -> pid, process_name tuple of crashed process
+ start_time: time the tests started at. We're looking for crash
+ logs after that time.
+ """
+ crash_logs = {}
+ for (test_name, process_name, pid) in crashed_processes:
+ # Passing None for output. This is a second pass after the test finished so
+ # if the output had any logging we would have already collected it.
+ crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
+ if crash_log:
+ crash_logs[test_name] = crash_log
+ return crash_logs
+
+ def find_system_pid(self, name, pid):
+ system_pid = int(pid)
+ # Windows and Cygwin PIDs are not the same. We need to find the Windows
+ # PID for our Cygwin process so we can match it later to any crash
+ # files we end up creating (which will be tagged with the Windows PID)
+ ps_process = self._executive.run_command(['ps', '-e'], error_handler=Executive.ignore_error)
+ for line in ps_process.splitlines():
+ tokens = line.strip().split()
+ try:
+ cpid, ppid, pgid, winpid, tty, uid, stime, process_name = tokens
+ if process_name.endswith(name):
+ self._executive.pid_to_system_pid[int(cpid)] = int(winpid)
+ if int(pid) == int(cpid):
+ system_pid = int(winpid)
+ break
+ except ValueError, e:
+ pass
+
+ return system_pid
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py b/Tools/Scripts/webkitpy/port/win_unittest.py
index 9def7246a..97da3a1b2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/win_unittest.py
+++ b/Tools/Scripts/webkitpy/port/win_unittest.py
@@ -27,15 +27,15 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import StringIO
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.executive_mock import MockExecutive, MockExecutive2
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import port_testcase
-from webkitpy.layout_tests.port.win import WinPort
+from webkitpy.port import port_testcase
+from webkitpy.port.win import WinPort
from webkitpy.tool.mocktool import MockOptions
@@ -105,5 +105,14 @@ class WinPortTest(port_testcase.PortTestCase):
self.assertEqual(port._runtime_feature_list(), ['foo', 'bar'])
def test_expectations_files(self):
- self.assertEqual(len(self.make_port().expectations_files()), 2)
- self.assertEqual(len(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Release')).expectations_files()), 4)
+ self.assertEqual(len(self.make_port().expectations_files()), 3)
+ self.assertEqual(len(self.make_port(options=MockOptions(webkit_test_runner=True, configuration='Release')).expectations_files()), 5)
+
+ def test_get_crash_log(self):
+ # Win crash logs are tested elsewhere, so here we just make sure we don't crash.
+ def fake_time_cb():
+ times = [0, 20, 40]
+ return lambda: times.pop(0)
+ port = self.make_port(port_name='win')
+ port._get_crash_log('DumpRenderTree', 1234, '', '', 0,
+ time_fn=fake_time_cb(), sleep_fn=lambda delay: None)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py b/Tools/Scripts/webkitpy/port/xvfbdriver.py
index b98c0392e..fc6675029 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver.py
+++ b/Tools/Scripts/webkitpy/port/xvfbdriver.py
@@ -31,14 +31,21 @@ import os
import re
import time
-from webkitpy.layout_tests.port.server_process import ServerProcess
-from webkitpy.layout_tests.port.driver import Driver
+from webkitpy.port.server_process import ServerProcess
+from webkitpy.port.driver import Driver
from webkitpy.common.system.file_lock import FileLock
_log = logging.getLogger(__name__)
class XvfbDriver(Driver):
+ @staticmethod
+ def check_xvfb(port):
+ xvfb_found = port.host.executive.run_command(['which', 'Xvfb'], return_exit_code=True) is 0
+ if not xvfb_found:
+ _log.error("No Xvfb found. Cannot run layout tests.")
+ return xvfb_found
+
def __init__(self, *args, **kwargs):
Driver.__init__(self, *args, **kwargs)
self._guard_lock = None
@@ -54,11 +61,13 @@ class XvfbDriver(Driver):
for i in range(99):
if i not in reserved_screens:
_guard_lock_file = self._port.host.filesystem.join('/tmp', 'WebKitXvfb.lock.%i' % i)
- self._guard_lock = FileLock(_guard_lock_file)
+ self._guard_lock = self._port.host.make_file_lock(_guard_lock_file)
if self._guard_lock.acquire_lock():
return i
def _start(self, pixel_tests, per_test_args):
+ self.stop()
+
# Use even displays for pixel tests and odd ones otherwise. When pixel tests are disabled,
# DriverProxy creates two drivers, one for normal and the other for ref tests. Both have
# the same worker number, so this prevents them from using the same Xvfb instance.
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver_unittest.py b/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py
index 241b37c1f..ec9f14c45 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/xvfbdriver_unittest.py
+++ b/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py
@@ -27,15 +27,15 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
-from webkitpy.layout_tests.port import Port
-from webkitpy.layout_tests.port.server_process_mock import MockServerProcess
-from webkitpy.layout_tests.port.xvfbdriver import XvfbDriver
+from webkitpy.port import Port
+from webkitpy.port.server_process_mock import MockServerProcess
+from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
@@ -43,7 +43,7 @@ _log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
- port = Port(host=MockSystemHost(log_executive=True, executive=executive), options=MockOptions(configuration='Release'))
+ port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._server_process_constructor = MockServerProcess
if xorg_running:
@@ -82,7 +82,7 @@ class XvfbDriverTest(unittest.TestCase):
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
- def disabled_test_next_free_display(self):
+ def test_next_free_display(self):
output = "Xorg /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
@@ -118,7 +118,7 @@ class XvfbDriverTest(unittest.TestCase):
def test_stop(self):
filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
- port = Port(host=MockSystemHost(log_executive=True, filesystem=filesystem), options=MockOptions(configuration='Release'))
+ port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
@@ -131,5 +131,5 @@ class XvfbDriverTest(unittest.TestCase):
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
- self.assertEqual(driver._xvfb_process, None)
+ self.assertIsNone(driver._xvfb_process)
self.assertFalse(port._filesystem.exists(driver._lock_file))
diff --git a/Tools/Scripts/webkitpy/style/checker.py b/Tools/Scripts/webkitpy/style/checker.py
index 7a1518b5b..f9e4a4695 100644
--- a/Tools/Scripts/webkitpy/style/checker.py
+++ b/Tools/Scripts/webkitpy/style/checker.py
@@ -39,6 +39,7 @@ from checkers.common import categories as CommonCategories
from checkers.common import CarriageReturnChecker
from checkers.changelog import ChangeLogChecker
from checkers.cpp import CppChecker
+from checkers.cmake import CMakeChecker
from checkers.jsonchecker import JSONChecker
from checkers.png import PNGChecker
from checkers.python import PythonChecker
@@ -164,6 +165,11 @@ _PATH_RULES_SPECIFIER = [
"Source/WebKit2/UIProcess/API/qt"],
["-readability/parameter_name"]),
+ ([# The GTK+ port uses the autotoolsconfig.h header in some C sources
+ # to serve the same purpose of config.h.
+ "Tools/GtkLauncher/main.c"],
+ ["-build/include_order"]),
+
([# The GTK+ APIs use GTK+ naming style, which includes
# lower-cased, underscore-separated values, whitespace before
# parens for function calls, and always having variable names.
@@ -174,7 +180,15 @@ _PATH_RULES_SPECIFIER = [
["-readability/naming",
"-readability/parameter_name",
"-readability/null",
+ "-readability/enum_casing",
"-whitespace/parens"]),
+
+ ([# The GTK+ API use upper case, underscore separated, words in
+ # certain types of enums (e.g. signals, properties).
+ "Source/WebKit2/UIProcess/API/gtk",
+ "Source/WebKit2/WebProcess/InjectedBundle/API/gtk"],
+ ["-readability/enum_casing"]),
+
([# Header files in ForwardingHeaders have no header guards or
# exceptional header guards (e.g., WebCore_FWD_Debugger_h).
"/ForwardingHeaders/"],
@@ -213,14 +227,19 @@ _PATH_RULES_SPECIFIER = [
([# The WebKit2 C API has names with underscores and whitespace-aligned
# struct members. Also, we allow unnecessary parameter names in
# WebKit2 APIs because we're matching CF's header style.
+ # Additionally, we use word which starts with non-capital letter 'k'
+ # for types of enums.
"Source/WebKit2/UIProcess/API/C/",
"Source/WebKit2/Shared/API/c/",
"Source/WebKit2/WebProcess/InjectedBundle/API/c/"],
- ["-readability/naming",
+ ["-readability/enum_casing",
+ "-readability/naming",
"-readability/parameter_name",
"-whitespace/declaration"]),
([# These files define GObjects, which implies some definitions of
# variables and functions containing underscores.
+ "Source/WebCore/platform/graphics/clutter/GraphicsLayerActor.cpp",
+ "Source/WebCore/platform/graphics/clutter/GraphicsLayerActor.h",
"Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer1.cpp",
"Source/WebCore/platform/graphics/gstreamer/VideoSinkGStreamer.cpp",
"Source/WebCore/platform/graphics/gstreamer/WebKitWebSourceGStreamer.cpp",
@@ -306,6 +325,8 @@ _XML_FILE_EXTENSIONS = [
_PNG_FILE_EXTENSION = 'png'
+_CMAKE_FILE_EXTENSION = 'cmake'
+
# Files to skip that are less obvious.
#
# Some files should be skipped when checking style. For example,
@@ -317,7 +338,9 @@ _SKIPPED_FILES_WITH_WARNING = [
# except those ending in ...Private.h are GTK+ API headers,
# which differ greatly from WebKit coding style.
re.compile(r'Source/WebKit2/UIProcess/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
- 'Source/WebKit2/UIProcess/API/gtk/webkit2.h']
+ re.compile(r'Source/WebKit2/WebProcess/InjectedBundle/API/gtk/WebKit(?!.*Private\.h).*\.h$'),
+ 'Source/WebKit2/UIProcess/API/gtk/webkit2.h',
+ 'Source/WebKit2/WebProcess/InjectedBundle/API/gtk/webkit-web-extension.h']
# Files to skip that are more common or obvious.
#
@@ -325,6 +348,9 @@ _SKIPPED_FILES_WITH_WARNING = [
# with FileType.NONE are automatically skipped without warning.
_SKIPPED_FILES_WITHOUT_WARNING = [
"LayoutTests" + os.path.sep,
+ "Source/ThirdParty/leveldb" + os.path.sep,
+ # Prevents this being recognized as a text file.
+ "Source/WebCore/GNUmakefile.features.am.in",
]
# Extensions of files which are allowed to contain carriage returns.
@@ -496,6 +522,7 @@ class FileType:
WATCHLIST = 7
XML = 8
XCODEPROJ = 9
+ CMAKE = 10
class CheckerDispatcher(object):
@@ -574,6 +601,8 @@ class CheckerDispatcher(object):
return FileType.XCODEPROJ
elif file_extension == _PNG_FILE_EXTENSION:
return FileType.PNG
+ elif ((file_extension == _CMAKE_FILE_EXTENSION) or os.path.basename(file_path) == 'CMakeLists.txt'):
+ return FileType.CMAKE
elif ((not file_extension and os.path.join("Tools", "Scripts") in file_path) or
file_extension in _TEXT_FILE_EXTENSIONS or os.path.basename(file_path) == 'TestExpectations'):
return FileType.TEXT
@@ -604,6 +633,8 @@ class CheckerDispatcher(object):
checker = XcodeProjectFileChecker(file_path, handle_style_error)
elif file_type == FileType.PNG:
checker = PNGChecker(file_path, handle_style_error)
+ elif file_type == FileType.CMAKE:
+ checker = CMakeChecker(file_path, handle_style_error)
elif file_type == FileType.TEXT:
basename = os.path.basename(file_path)
if basename == 'TestExpectations':
diff --git a/Tools/Scripts/webkitpy/style/checker_unittest.py b/Tools/Scripts/webkitpy/style/checker_unittest.py
index 3b7d94c02..9a139c7c8 100755..100644
--- a/Tools/Scripts/webkitpy/style/checker_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checker_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# Copyright (C) 2009 Google Inc. All rights reserved.
@@ -36,7 +35,7 @@
import logging
import os
-import unittest
+import unittest2 as unittest
import checker as style
from webkitpy.common.system.logtesting import LogTesting, TestLogStream
@@ -174,7 +173,7 @@ class GlobalVariablesTest(unittest.TestCase):
# begin with -.
self.assertTrue(rule.startswith('-'))
# Check no rule occurs twice.
- self.assertFalse(rule in already_seen)
+ self.assertNotIn(rule, already_seen)
already_seen.append(rule)
def test_defaults(self):
@@ -275,8 +274,8 @@ class GlobalVariablesTest(unittest.TestCase):
"""Check that _MAX_REPORTS_PER_CATEGORY is valid."""
all_categories = self._all_categories()
for category in _MAX_REPORTS_PER_CATEGORY.iterkeys():
- self.assertTrue(category in all_categories,
- 'Key "%s" is not a category' % category)
+ self.assertIn(category, all_categories,
+ 'Key "%s" is not a category' % category)
class CheckWebKitStyleFunctionTest(unittest.TestCase):
@@ -397,7 +396,7 @@ class CheckerDispatcherDispatchTest(unittest.TestCase):
def assert_checker_none(self, file_path):
"""Assert that the dispatched checker is None."""
checker = self.dispatch(file_path)
- self.assertTrue(checker is None, 'Checking: "%s"' % file_path)
+ self.assertIsNone(checker, 'Checking: "%s"' % file_path)
def assert_checker(self, file_path, expected_class):
"""Assert the type of the dispatched checker."""
diff --git a/Tools/Scripts/webkitpy/style/checkers/changelog.py b/Tools/Scripts/webkitpy/style/checkers/changelog.py
index a096d3f46..699bc3dea 100644
--- a/Tools/Scripts/webkitpy/style/checkers/changelog.py
+++ b/Tools/Scripts/webkitpy/style/checkers/changelog.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py b/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py
index 315cd91b2..0ec0ec2da 100644
--- a/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/changelog_unittest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Patrick Gansterer <paroga@paroga.com>
#
@@ -26,7 +24,7 @@
"""Unit test for changelog.py."""
import changelog
-import unittest
+import unittest2 as unittest
class ChangeLogCheckerTest(unittest.TestCase):
@@ -182,6 +180,3 @@ class ChangeLogCheckerTest(unittest.TestCase):
' * Source/WebKit/foo.cpp: \n'
' * Source/WebKit/bar.cpp:\n'
' * Source/WebKit/foobar.cpp: Description\n')
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/cmake.py b/Tools/Scripts/webkitpy/style/checkers/cmake.py
new file mode 100644
index 000000000..06b8929fa
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/cmake.py
@@ -0,0 +1,150 @@
+# Copyright (C) 2012 Intel Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""Supports checking WebKit style in cmake files.(.cmake, CMakeLists.txt)"""
+
+import re
+
+from common import TabChecker
+
+
+class CMakeChecker(object):
+
+ """Processes CMake lines for checking style."""
+
+ # NO_SPACE_CMDS list are based on commands section of CMake document.
+ # Now it is generated from
+ # http://www.cmake.org/cmake/help/v2.8.10/cmake.html#section_Commands.
+ # Some commands are from default CMake modules such as pkg_check_modules.
+ # Please keep list in alphabet order.
+ #
+ # For commands in this list, spaces should not be added it and its
+ # parentheses. For eg, message("testing"), not message ("testing")
+ #
+ # The conditional commands like if, else, endif, foreach, endforeach,
+ # while, endwhile and break are listed in ONE_SPACE_CMDS
+ NO_SPACE_CMDS = [
+ 'add_custom_command', 'add_custom_target', 'add_definitions',
+ 'add_dependencies', 'add_executable', 'add_library',
+ 'add_subdirectory', 'add_test', 'aux_source_directory',
+ 'build_command',
+ 'cmake_minimum_required', 'cmake_policy', 'configure_file',
+ 'create_test_sourcelist',
+ 'define_property',
+ 'enable_language', 'enable_testing', 'endfunction', 'endmacro',
+ 'execute_process', 'export',
+ 'file', 'find_file', 'find_library', 'find_package', 'find_path',
+ 'find_program', 'fltk_wrap_ui', 'function',
+ 'get_cmake_property', 'get_directory_property',
+ 'get_filename_component', 'get_property', 'get_source_file_property',
+ 'get_target_property', 'get_test_property',
+ 'include', 'include_directories', 'include_external_msproject',
+ 'include_regular_expression', 'install',
+ 'link_directories', 'list', 'load_cache', 'load_command',
+ 'macro', 'mark_as_advanced', 'math', 'message',
+ 'option',
+ #From FindPkgConfig.cmake
+ 'pkg_check_modules',
+ 'project',
+ 'qt_wrap_cpp', 'qt_wrap_ui',
+ 'remove_definitions', 'return',
+ 'separate_arguments', 'set', 'set_directory_properties', 'set_property',
+ 'set_source_files_properties', 'set_target_properties',
+ 'set_tests_properties', 'site_name', 'source_group', 'string',
+ 'target_link_libraries', 'try_compile', 'try_run',
+ 'unset',
+ 'variable_watch',
+ ]
+
+ # CMake conditional commands, require one space between command and
+ # its parentheses, such as "if (", "foreach (", etc.
+ ONE_SPACE_CMDS = [
+ 'if', 'else', 'elseif', 'endif',
+ 'foreach', 'endforeach',
+ 'while', 'endwhile',
+ 'break',
+ ]
+
+ def __init__(self, file_path, handle_style_error):
+ self._handle_style_error = handle_style_error
+ self._tab_checker = TabChecker(file_path, handle_style_error)
+
+ def check(self, lines):
+ self._tab_checker.check(lines)
+ self._num_lines = len(lines)
+ for l in xrange(self._num_lines):
+ self._process_line(l + 1, lines[l])
+
+ def _process_line(self, line_number, line_content):
+ if re.match('(^|\ +)#', line_content):
+ # ignore comment line
+ return
+ l = line_content.expandtabs(4)
+ # check command like message( "testing")
+ if re.search('\(\ +', l):
+ self._handle_style_error(line_number, 'whitespace/parentheses', 5,
+ 'No space after "("')
+ # check command like message("testing" )
+ if re.search('\ +\)', l) and not re.search('^\ +\)$', l):
+ self._handle_style_error(line_number, 'whitespace/parentheses', 5,
+ 'No space before ")"')
+ self._check_trailing_whitespace(line_number, l)
+ self._check_no_space_cmds(line_number, l)
+ self._check_one_space_cmds(line_number, l)
+ self._check_indent(line_number, line_content)
+
+ def _check_trailing_whitespace(self, line_number, line_content):
+ line_content = line_content.rstrip('\n') # chr(10), newline
+ line_content = line_content.rstrip('\r') # chr(13), carriage return
+ line_content = line_content.rstrip('\x0c') # chr(12), form feed, ^L
+ stripped = line_content.rstrip()
+ if line_content != stripped:
+ self._handle_style_error(line_number, 'whitespace/trailing', 5,
+ 'No trailing spaces')
+
+ def _check_no_space_cmds(self, line_number, line_content):
+ # check command like "SET (" or "Set("
+ for t in self.NO_SPACE_CMDS:
+ self._check_non_lowercase_cmd(line_number, line_content, t)
+ if re.search('(^|\ +)' + t.lower() + '\ +\(', line_content):
+ msg = 'No space between command "' + t.lower() + '" and its parentheses, should be "' + t + '("'
+ self._handle_style_error(line_number, 'whitespace/parentheses', 5, msg)
+
+ def _check_one_space_cmds(self, line_number, line_content):
+ # check command like "IF (" or "if(" or "if (" or "If ()"
+ for t in self.ONE_SPACE_CMDS:
+ self._check_non_lowercase_cmd(line_number, line_content, t)
+ if re.search('(^|\ +)' + t.lower() + '(\(|\ \ +\()', line_content):
+ msg = 'One space between command "' + t.lower() + '" and its parentheses, should be "' + t + ' ("'
+ self._handle_style_error(line_number, 'whitespace/parentheses', 5, msg)
+
+ def _check_non_lowercase_cmd(self, line_number, line_content, cmd):
+ if re.search('(^|\ +)' + cmd + '\ *\(', line_content, flags=re.IGNORECASE) and \
+ (not re.search('(^|\ +)' + cmd.lower() + '\ *\(', line_content)):
+ msg = 'Use lowercase command "' + cmd.lower() + '"'
+ self._handle_style_error(line_number, 'command/lowercase', 5, msg)
+
+ def _check_indent(self, line_number, line_content):
+ #TODO (halton): add indent checking
+ pass
diff --git a/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py
new file mode 100644
index 000000000..7b2cdaf0e
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/cmake_unittest.py
@@ -0,0 +1,90 @@
+# Copyright (C) 2012 Intel Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Unit test for cmake.py."""
+
+import unittest2 as unittest
+
+from cmake import CMakeChecker
+
+
+class CMakeCheckerTest(unittest.TestCase):
+
+ """Tests CMakeChecker class."""
+
+ def test_init(self):
+ """Test __init__() method."""
+ def _mock_handle_style_error(self):
+ pass
+
+ checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
+ self.assertEqual(checker._handle_style_error, _mock_handle_style_error)
+
+ def test_check(self):
+ """Test check() method."""
+ errors = []
+
+ def _mock_handle_style_error(line_number, category, confidence,
+ message):
+ error = (line_number, category, confidence, message)
+ errors.append(error)
+
+ checker = CMakeChecker("foo.cmake", _mock_handle_style_error)
+
+ lines = [
+ '# This file is sample input for cmake_unittest.py and includes below problems:\n',
+ 'IF ()',
+ '\tmessage("Error line with Tab")\n',
+ ' message("Error line with endding spaces") \n',
+ ' message( "Error line with space after (")\n',
+ ' message("Error line with space before (" )\n',
+ ' MESSAGE("Error line with upper case non-condtional command")\n',
+ ' MESSage("Error line with upper case non-condtional command")\n',
+ ' message("correct message line")\n',
+ 'ENDif ()\n',
+ '\n',
+ 'if()\n',
+ 'endif ()\n',
+ '\n',
+ 'macro ()\n',
+ 'ENDMacro()\n',
+ '\n',
+ 'function ()\n',
+ 'endfunction()\n',
+ ]
+ checker.check(lines)
+
+ self.maxDiff = None
+ self.assertEqual(errors, [
+ (3, 'whitespace/tab', 5, 'Line contains tab character.'),
+ (2, 'command/lowercase', 5, 'Use lowercase command "if"'),
+ (4, 'whitespace/trailing', 5, 'No trailing spaces'),
+ (5, 'whitespace/parentheses', 5, 'No space after "("'),
+ (6, 'whitespace/parentheses', 5, 'No space before ")"'),
+ (7, 'command/lowercase', 5, 'Use lowercase command "message"'),
+ (8, 'command/lowercase', 5, 'Use lowercase command "message"'),
+ (10, 'command/lowercase', 5, 'Use lowercase command "endif"'),
+ (12, 'whitespace/parentheses', 5, 'One space between command "if" and its parentheses, should be "if ("'),
+ (15, 'whitespace/parentheses', 5, 'No space between command "macro" and its parentheses, should be "macro("'),
+ (16, 'command/lowercase', 5, 'Use lowercase command "endmacro"'),
+ (18, 'whitespace/parentheses', 5, 'No space between command "function" and its parentheses, should be "function("'),
+ ])
diff --git a/Tools/Scripts/webkitpy/style/checkers/common_unittest.py b/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
index df4707730..8449b989b 100644
--- a/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/common_unittest.py
@@ -22,7 +22,7 @@
"""Unit tests for common.py."""
-import unittest
+import unittest2 as unittest
from common import CarriageReturnChecker
from common import TabChecker
@@ -119,6 +119,3 @@ class TabCheckerTest(unittest.TestCase):
def test_tab(self):
self.assert_tab(['\tfoo'], [1])
self.assert_tab(['line1', '\tline2', 'line3\t'], [2, 3])
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp.py b/Tools/Scripts/webkitpy/style/checkers/cpp.py
index c99cbea3d..d67e69ad8 100644
--- a/Tools/Scripts/webkitpy/style/checkers/cpp.py
+++ b/Tools/Scripts/webkitpy/style/checkers/cpp.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009, 2010, 2012 Google Inc. All rights reserved.
@@ -649,13 +648,16 @@ class FileInfo:
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
- # Not SVN? Try to find a git top level directory by
+
+ # Not SVN <= 1.6? Try to find a git, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir)
- and not os.path.exists(os.path.join(root_dir, ".git"))):
+ and not os.path.exists(os.path.join(root_dir, ".git"))
+ and not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
- if os.path.exists(os.path.join(root_dir, ".git")):
+ if (os.path.exists(os.path.join(root_dir, ".git")) or
+ os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
@@ -1217,6 +1219,7 @@ class _EnumState(object):
def __init__(self):
self.in_enum_decl = False
+ self.is_webidl_enum = False
def process_clean_line(self, line):
# FIXME: The regular expressions for expr_all_uppercase and expr_enum_end only accept integers
@@ -1229,21 +1232,27 @@ class _EnumState(object):
if self.in_enum_decl:
if match(r'\s*' + expr_enum_end + r'$', line):
self.in_enum_decl = False
+ self.is_webidl_enum = False
elif match(expr_all_uppercase, line):
- return False
+ return self.is_webidl_enum
elif match(expr_starts_lowercase, line):
return False
else:
- if match(expr_enum_start + r'$', line):
+ matched = match(expr_enum_start + r'$', line)
+ if matched:
self.in_enum_decl = True
else:
matched = match(expr_enum_start + r'(?P<members>.*)' + expr_enum_end + r'$', line)
if matched:
members = matched.group('members').split(',')
+ found_invalid_member = False
for member in members:
if match(expr_all_uppercase, member):
- return False
+ found_invalid_member = not self.is_webidl_enum
if match(expr_starts_lowercase, member):
+ found_invalid_member = True
+ if found_invalid_member:
+ self.is_webidl_enum = False
return False
return True
return True
@@ -1656,20 +1665,6 @@ def check_function_definition(filename, file_extension, clean_lines, line_number
return
modifiers_and_return_type = function_state.modifiers_and_return_type()
- if filename.find('/chromium/') != -1 and search(r'\bWEBKIT_EXPORT\b', modifiers_and_return_type):
- if filename.find('/chromium/public/') == -1 and filename.find('/chromium/tests/') == -1 and filename.find('chromium/platform') == -1:
- error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
- 'WEBKIT_EXPORT should only appear in the chromium public (or tests) directory.')
- elif not file_extension == "h":
- error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
- 'WEBKIT_EXPORT should only be used in header files.')
- elif not function_state.is_declaration or search(r'\binline\b', modifiers_and_return_type):
- error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
- 'WEBKIT_EXPORT should not be used on a function with a body.')
- elif function_state.is_pure:
- error(function_state.function_name_start_position.row, 'readability/webkit_export', 5,
- 'WEBKIT_EXPORT should not be used with a pure virtual function.')
-
check_function_definition_and_pass_ptr(modifiers_and_return_type, function_state.function_name_start_position.row, 'return', error)
parameter_list = function_state.parameter_list()
@@ -2088,6 +2083,8 @@ def check_enum_casing(clean_lines, line_number, enum_state, error):
error: The function to call with any errors found.
"""
+ enum_state.is_webidl_enum |= bool(match(r'\s*// Web(?:Kit)?IDL enum\s*$', clean_lines.raw_lines[line_number]))
+
line = clean_lines.elided[line_number] # Get rid of comments and strings.
if not enum_state.process_clean_line(line):
error(line_number, 'readability/enum_casing', 4,
@@ -2164,6 +2161,30 @@ def check_using_std(clean_lines, line_number, file_state, error):
"Use 'using namespace std;' instead of 'using std::%s;'." % method_name)
+def check_using_namespace(clean_lines, line_number, file_extension, error):
+ """Looks for 'using namespace foo;' which should be removed.
+
+ Args:
+ clean_lines: A CleansedLines instance containing the file.
+ line_number: The number of the line to check.
+ file_extension: The extension (dot not included) of the file.
+ error: The function to call with any errors found.
+ """
+
+ # This check applies only to headers.
+ if file_extension != 'h':
+ return
+
+ line = clean_lines.elided[line_number] # Get rid of comments and strings.
+
+ using_namespace_match = match(r'\s*using\s+namespace\s+(?P<method_name>\S+)\s*;\s*$', line)
+ if not using_namespace_match:
+ return
+
+ method_name = using_namespace_match.group('method_name')
+ error(line_number, 'build/using_namespace', 4,
+ "Do not use 'using namespace %s;'." % method_name)
+
def check_max_min_macros(clean_lines, line_number, file_state, error):
"""Looks use of MAX() and MIN() macros that should be replaced with std::max() and std::min().
@@ -2548,8 +2569,8 @@ def check_for_null(clean_lines, line_number, file_state, error):
if search(r'\bgdk_pixbuf_save_to\w+\b', line):
return
- # Don't warn about NULL usage in gtk_widget_style_get() or gtk_style_context_get_style. See Bug 51758
- if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line):
+ # Don't warn about NULL usage in gtk_widget_style_get(), gtk_style_context_get_style(), or gtk_style_context_get(). See Bug 51758
+ if search(r'\bgtk_widget_style_get\(\w+\b', line) or search(r'\bgtk_style_context_get_style\(\w+\b', line) or search(r'\bgtk_style_context_get\(\w+\b', line):
return
# Don't warn about NULL usage in soup_server_new(). See Bug 77890.
@@ -2648,6 +2669,7 @@ def check_style(clean_lines, line_number, file_extension, class_state, file_stat
check_namespace_indentation(clean_lines, line_number, file_extension, file_state, error)
check_directive_indentation(clean_lines, line_number, file_state, error)
check_using_std(clean_lines, line_number, file_state, error)
+ check_using_namespace(clean_lines, line_number, file_extension, error)
check_max_min_macros(clean_lines, line_number, file_state, error)
check_ctype_functions(clean_lines, line_number, file_state, error)
check_switch_indentation(clean_lines, line_number, error)
@@ -2818,10 +2840,6 @@ def check_include_line(filename, file_extension, clean_lines, line_number, inclu
error(line_number, 'build/include', 4,
'wtf includes should be <wtf/file.h> instead of "wtf/file.h".')
- if filename.find('/chromium/') != -1 and include.startswith('cc/CC'):
- error(line_number, 'build/include', 4,
- 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".')
-
duplicate_header = include in include_state
if duplicate_header:
error(line_number, 'build/include', 4,
@@ -3637,6 +3655,7 @@ class CppChecker(object):
'build/printf_format',
'build/storage_class',
'build/using_std',
+ 'build/using_namespace',
'legal/copyright',
'readability/braces',
'readability/casting',
diff --git a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
index 822ed77c9..6ef857c41 100644
--- a/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/cpp_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# -*- coding: utf-8; -*-
#
# Copyright (C) 2011 Google Inc. All rights reserved.
@@ -40,7 +39,7 @@ import codecs
import os
import random
import re
-import unittest
+import unittest2 as unittest
import cpp as cpp_style
from cpp import CppChecker
from ..filter import FilterConfiguration
@@ -2761,15 +2760,6 @@ class OrderOfIncludesTest(CppStyleTestBase):
'wtf includes should be <wtf/file.h> instead of "wtf/file.h".'
' [build/include] [4]')
- def test_check_cc_includes(self):
- self.assert_language_rules_check('bar/chromium/foo.cpp',
- '#include "config.h"\n'
- '#include "foo.h"\n'
- '\n'
- '#include "cc/CCProxy.h"\n',
- 'cc includes should be "CCFoo.h" instead of "cc/CCFoo.h".'
- ' [build/include] [4]')
-
def test_classify_include(self):
classify_include = cpp_style._classify_include
include_state = cpp_style._IncludeState()
@@ -3295,6 +3285,21 @@ class NoNonVirtualDestructorsTest(CppStyleTestBase):
};''',
'')
+ self.assert_multi_line_lint(
+ '''\
+ // WebIDL enum
+ enum Foo {
+ FOO_ONE = 1,
+ FOO_TWO = 2,
+ };''',
+ '')
+
+ self.assert_multi_line_lint(
+ '''\
+ // WebKitIDL enum
+ enum Foo { FOO_ONE, FOO_TWO };''',
+ '')
+
def test_destructor_non_virtual_when_virtual_needed(self):
self.assert_multi_line_lint_re(
'''\
@@ -4397,6 +4402,9 @@ class WebKitStyleTest(CppStyleTestBase):
'gtk_style_context_get_style(context, "propertyName", &value, "otherName", &otherValue, NULL);',
'')
self.assert_lint(
+ 'gtk_style_context_get(context, static_cast<GtkStateFlags>(0), "property", &value, NULL);',
+ '')
+ self.assert_lint(
'gtk_widget_style_get_property(style, NULL, NULL);',
'Use 0 instead of NULL. [readability/null] [5]',
'foo.cpp')
@@ -4482,6 +4490,13 @@ class WebKitStyleTest(CppStyleTestBase):
" [build/using_std] [4]",
'foo.cpp')
+ def test_using_namespace(self):
+ self.assert_lint(
+ 'using namespace foo;',
+ "Do not use 'using namespace foo;'."
+ " [build/using_namespace] [4]",
+ 'foo.h')
+
def test_max_macro(self):
self.assert_lint(
'int i = MAX(0, 1);',
@@ -4778,50 +4793,16 @@ class WebKitStyleTest(CppStyleTestBase):
' [whitespace/comments] [5]')
def test_webkit_export_check(self):
- webkit_export_error_rules = ('-',
- '+readability/webkit_export')
+ webkit_export_error_rules = ('-', '+readability/webkit_export')
self.assertEqual('',
- self.perform_lint('WEBKIT_EXPORT int foo();\n',
- 'WebKit/chromium/public/test.h',
- webkit_export_error_rules))
- self.assertEqual('',
- self.perform_lint('WEBKIT_EXPORT int foo();\n',
- 'WebKit/chromium/tests/test.h',
- webkit_export_error_rules))
- self.assertEqual('WEBKIT_EXPORT should only be used in header files. [readability/webkit_export] [5]',
- self.perform_lint('WEBKIT_EXPORT int foo();\n',
- 'WebKit/chromium/public/test.cpp',
- webkit_export_error_rules))
- self.assertEqual('WEBKIT_EXPORT should only appear in the chromium public (or tests) directory. [readability/webkit_export] [5]',
- self.perform_lint('WEBKIT_EXPORT int foo();\n',
- 'WebKit/chromium/src/test.h',
- webkit_export_error_rules))
- self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]',
- self.perform_lint('WEBKIT_EXPORT int foo() { }\n',
- 'WebKit/chromium/public/test.h',
- webkit_export_error_rules))
- self.assertEqual('WEBKIT_EXPORT should not be used on a function with a body. [readability/webkit_export] [5]',
- self.perform_lint('WEBKIT_EXPORT inline int foo()\n'
- '{\n'
- '}\n',
- 'WebKit/chromium/public/test.h',
- webkit_export_error_rules))
- self.assertEqual('WEBKIT_EXPORT should not be used with a pure virtual function. [readability/webkit_export] [5]',
- self.perform_lint('{}\n'
- 'WEBKIT_EXPORT\n'
- 'virtual\n'
- 'int\n'
- 'foo() = 0;\n',
- 'WebKit/chromium/public/test.h',
- webkit_export_error_rules))
- self.assertEqual('',
- self.perform_lint('{}\n'
- 'WEBKIT_EXPORT\n'
- 'virtual\n'
- 'int\n'
- 'foo() = 0;\n',
- 'test.h',
- webkit_export_error_rules))
+ self.perform_lint(
+ '{}\n'
+ 'WEBKIT_EXPORT\n'
+ 'virtual\n'
+ 'int\n'
+ 'foo() = 0;\n',
+ 'test.h',
+ webkit_export_error_rules))
def test_other(self):
# FIXME: Implement this.
@@ -4874,30 +4855,3 @@ class CppCheckerTest(unittest.TestCase):
# Thus, just check the distinguishing case to verify that the
# code defines __ne__.
self.assertFalse(checker1 != checker2)
-
-
-def tearDown():
- """A global check to make sure all error-categories have been tested.
-
- The main tearDown() routine is the only code we can guarantee will be
- run after all other tests have been executed.
- """
- try:
- if _run_verifyallcategoriesseen:
- ErrorCollector(None).verify_all_categories_are_seen()
- except NameError:
- # If nobody set the global _run_verifyallcategoriesseen, then
- # we assume we shouldn't run the test
- pass
-
-if __name__ == '__main__':
- import sys
- # We don't want to run the verify_all_categories_are_seen() test unless
- # we're running the full test suite: if we only run one test,
- # obviously we're not going to see all the error categories. So we
- # only run verify_all_categories_are_seen() when no commandline flags
- # are passed in.
- global _run_verifyallcategoriesseen
- _run_verifyallcategoriesseen = (len(sys.argv) == 1)
-
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py b/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
index e7fbbb42c..62a0793c4 100755..100644
--- a/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -24,7 +22,7 @@
"""Unit test for jsonchecker.py."""
-import unittest
+import unittest2 as unittest
import jsonchecker
@@ -69,7 +67,7 @@ class JSONCheckerTest(unittest.TestCase):
mock_error_handler.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
- self.assertTrue(category in jsonchecker.JSONChecker.categories)
+ self.assertIn(category, jsonchecker.JSONChecker.categories)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
@@ -111,6 +109,3 @@ class JSONCheckerTest(unittest.TestCase):
]
}
""")
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/png_unittest.py b/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
index 92b5ba448..a71a441f1 100644
--- a/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/png_unittest.py
@@ -23,7 +23,7 @@
"""Unit test for png.py."""
-import unittest
+import unittest2 as unittest
from png import PNGChecker
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.systemhost_mock import MockSystemHost
@@ -129,6 +129,3 @@ class PNGCheckerTest(unittest.TestCase):
checker.check()
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0], (0, 'image/png', 5, 'Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.'))
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/python.py b/Tools/Scripts/webkitpy/style/checkers/python.py
index 9fc436f3d..2066d9d51 100644
--- a/Tools/Scripts/webkitpy/style/checkers/python.py
+++ b/Tools/Scripts/webkitpy/style/checkers/python.py
@@ -95,6 +95,7 @@ class Pylinter(object):
"Instance of 'Popen' has no 'stdout' member",
"Instance of 'Popen' has no 'stderr' member",
"Instance of 'Popen' has no 'wait' member",
+ "Instance of 'Popen' has no 'pid' member",
]
def __init__(self):
diff --git a/Tools/Scripts/webkitpy/style/checkers/python_unittest.py b/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
index 73bda76e2..2aa7cbe4c 100644
--- a/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/python_unittest.py
@@ -23,7 +23,7 @@
"""Unit tests for python.py."""
import os
-import unittest
+import unittest2 as unittest
from python import PythonChecker
@@ -61,3 +61,20 @@ class PythonCheckerTest(unittest.TestCase):
(4, "pep8/W291", 5, "trailing whitespace"),
(4, "pylint/E0602", 5, "Undefined variable 'error'"),
])
+
+ def test_pylint_false_positives(self):
+ """Test that pylint false positives are suppressed."""
+ errors = []
+
+ def _mock_handle_style_error(line_number, category, confidence,
+ message):
+ error = (line_number, category, confidence, message)
+ errors.append(error)
+
+ current_dir = os.path.dirname(__file__)
+ file_path = os.path.join(current_dir, "python_unittest_falsepositives.py")
+
+ checker = PythonChecker(file_path, _mock_handle_style_error)
+ checker.check(lines=[])
+
+ self.assertEqual(errors, [])
diff --git a/Tools/Scripts/webkitpy/style/checkers/python_unittest_falsepositives.py b/Tools/Scripts/webkitpy/style/checkers/python_unittest_falsepositives.py
new file mode 100644
index 000000000..0ad66a502
--- /dev/null
+++ b/Tools/Scripts/webkitpy/style/checkers/python_unittest_falsepositives.py
@@ -0,0 +1,16 @@
+# This test verifies that the false positives generated by pylint are
+# correctly suppressed.
+
+import subprocess
+
+
+def test_popen(proc):
+ p = subprocess.Popen(proc, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ tmp1 = p.poll
+ tmp2 = p.returncode
+ tmp3 = p.stdin
+ tmp4 = p.stdout
+ tmp5 = p.stderr
+ tmp6 = p.wait
+ tmp7 = p.pid
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
index 51b97bec5..67f38416d 100644
--- a/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations.py
@@ -62,7 +62,6 @@ class TestExpectationsChecker(object):
def __init__(self, file_path, handle_style_error, host=None):
self._file_path = file_path
self._handle_style_error = handle_style_error
- self._handle_style_error.turn_off_line_filtering()
self._tab_checker = TabChecker(file_path, handle_style_error)
# FIXME: host should be a required parameter, not an optional one.
diff --git a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
index b1d7f77f4..18648dfee 100644
--- a/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/test_expectations_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -29,7 +28,7 @@
import os
import sys
-import unittest
+import unittest2 as unittest
from test_expectations import TestExpectationsChecker
from webkitpy.common.host_mock import MockHost
@@ -71,12 +70,10 @@ class TestExpectationsTestCase(unittest.TestCase):
if port:
self.assertTrue(port.name().startswith(expected_port_implementation))
else:
- self.assertEqual(None, expected_port_implementation)
+ self.assertIsNone(expected_port_implementation)
def test_determine_port_from_expectations_path(self):
self._expect_port_for_expectations_path(None, '/')
- self._expect_port_for_expectations_path(None, 'LayoutTests/chromium-mac/TestExpectations')
- self._expect_port_for_expectations_path('chromium', 'LayoutTests/platform/chromium/TestExpectations')
self._expect_port_for_expectations_path(None, '/mock-checkout/LayoutTests/platform/win/TestExpectations')
self._expect_port_for_expectations_path('win', 'LayoutTests/platform/win/TestExpectations')
self._expect_port_for_expectations_path('efl', 'LayoutTests/platform/efl/TestExpectations')
@@ -95,7 +92,7 @@ class TestExpectationsTestCase(unittest.TestCase):
self._error_collector, host=host)
# We should have failed to find a valid port object for that path.
- self.assertEqual(checker._port_obj, None)
+ self.assertIsNone(checker._port_obj)
# Now use a test port so we can check the lines.
checker._port_obj = host.port_factory.get('test-mac-leopard')
@@ -108,10 +105,15 @@ class TestExpectationsTestCase(unittest.TestCase):
self.assertEqual(expected_output, self._error_collector.get_errors())
else:
self.assertNotEquals('', self._error_collector.get_errors())
- self.assertTrue(self._error_collector.turned_off_filtering)
+
+ # Note that a patch might change a line that introduces errors elsewhere, but we
+ # don't want to lint the whole file (it can unfairly punish patches for pre-existing errors).
+ # We rely on a separate lint-webkitpy step on the bots to keep the whole file okay.
+ # FIXME: See https://bugs.webkit.org/show_bug.cgi?id=104712 .
+ self.assertFalse(self._error_collector.turned_off_filtering)
def test_valid_expectations(self):
- self.assert_lines_lint(["crbug.com/1234 [ Mac ] passes/text.html [ Pass Failure ]"], should_pass=True)
+ self.assert_lines_lint(["webkit.org/b/1234 [ Mac ] passes/text.html [ Pass Failure ]"], should_pass=True)
def test_invalid_expectations(self):
self.assert_lines_lint(["Bug(me) passes/text.html [ Give Up]"], should_pass=False)
diff --git a/Tools/Scripts/webkitpy/style/checkers/text_unittest.py b/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
index 18db6ad3d..01e373abc 100644
--- a/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/text_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -29,7 +28,7 @@
"""Unit test for text_style.py."""
-import unittest
+import unittest2 as unittest
import text as text_style
from text import TextChecker
@@ -46,7 +45,7 @@ class TextStyleTestCase(unittest.TestCase):
self.had_error = True
text_style.process_file_data('', lines, error_for_test)
- self.assertTrue(not self.had_error, '%s should not have any errors.' % lines)
+ self.assertFalse(self.had_error, '%s should not have any errors.' % lines)
def assertError(self, lines, expected_line_number):
"""Asserts that the specified lines has an error."""
@@ -88,7 +87,3 @@ class TextCheckerTest(unittest.TestCase):
checker = TextChecker("foo.txt", self.mock_handle_style_error)
self.assertEqual(checker.file_path, "foo.txt")
self.assertEqual(checker.handle_style_error, self.mock_handle_style_error)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py b/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
index ff3a315f1..90950c9f9 100644
--- a/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/watchlist_unittest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2010 Apple Inc. All rights reserved.
# Copyright (C) 2011 Google Inc. All rights reserved.
#
@@ -33,7 +31,7 @@
'''Unit tests for watchlist.py.'''
-import unittest
+import unittest2 as unittest
import watchlist
diff --git a/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py b/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
index 89c072d94..3de3d197f 100644
--- a/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
+++ b/Tools/Scripts/webkitpy/style/checkers/xcodeproj.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
diff --git a/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
index 9713fd154..36a1a41da 100644
--- a/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/xcodeproj_unittest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -26,7 +24,7 @@
"""Unit test for xcodeproj.py."""
import xcodeproj
-import unittest
+import unittest2 as unittest
class TestErrorHandler(object):
@@ -69,6 +67,3 @@ class XcodeProjectFileCheckerTest(unittest.TestCase):
self.assert_error([''], 'Missing "developmentRegion = English".')
self.assert_error(['developmentRegion = Japanese;'],
'developmentRegion is not English.')
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
index 7055a4f92..a224de3f0 100644
--- a/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
+++ b/Tools/Scripts/webkitpy/style/checkers/xml_unittest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-#
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -24,7 +22,7 @@
"""Unit test for xml.py."""
-import unittest
+import unittest2 as unittest
import xml
@@ -87,6 +85,3 @@ class XMLCheckerTest(unittest.TestCase):
def test_no_error(self):
self.assert_no_error('<foo>\n</foo>')
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/style/error_handlers_unittest.py b/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
index c02143178..0b4c35596 100644
--- a/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
+++ b/Tools/Scripts/webkitpy/style/error_handlers_unittest.py
@@ -23,7 +23,7 @@
"""Unit tests for error_handlers.py."""
-import unittest
+import unittest2 as unittest
from checker import StyleProcessorConfiguration
from error_handlers import DefaultStyleErrorHandler
diff --git a/Tools/Scripts/webkitpy/style/filereader_unittest.py b/Tools/Scripts/webkitpy/style/filereader_unittest.py
index d728c463a..2c506ec20 100644
--- a/Tools/Scripts/webkitpy/style/filereader_unittest.py
+++ b/Tools/Scripts/webkitpy/style/filereader_unittest.py
@@ -20,7 +20,7 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.logtesting import LoggingTestCase
diff --git a/Tools/Scripts/webkitpy/style/filter_unittest.py b/Tools/Scripts/webkitpy/style/filter_unittest.py
index c20d9981a..b4dd7bc7c 100644
--- a/Tools/Scripts/webkitpy/style/filter_unittest.py
+++ b/Tools/Scripts/webkitpy/style/filter_unittest.py
@@ -22,7 +22,7 @@
"""Unit tests for filter.py."""
-import unittest
+import unittest2 as unittest
from filter import _CategoryFilter as CategoryFilter
from filter import validate_filter_rules
diff --git a/Tools/Scripts/webkitpy/style/main_unittest.py b/Tools/Scripts/webkitpy/style/main_unittest.py
index e0191687a..dbfc7c83a 100644
--- a/Tools/Scripts/webkitpy/style/main_unittest.py
+++ b/Tools/Scripts/webkitpy/style/main_unittest.py
@@ -20,7 +20,7 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from main import change_directory
from webkitpy.common.system.filesystem_mock import MockFileSystem
diff --git a/Tools/Scripts/webkitpy/style/optparser_unittest.py b/Tools/Scripts/webkitpy/style/optparser_unittest.py
index 979b31d1a..0c9002adf 100644
--- a/Tools/Scripts/webkitpy/style/optparser_unittest.py
+++ b/Tools/Scripts/webkitpy/style/optparser_unittest.py
@@ -22,7 +22,7 @@
"""Unit tests for parser.py."""
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.logtesting import LoggingTestCase
from webkitpy.style.optparser import ArgumentParser
@@ -145,9 +145,9 @@ class ArgumentParserTest(LoggingTestCase):
self.assertEqual(files, [])
self.assertEqual(options.filter_rules, [])
- self.assertEqual(options.git_commit, None)
- self.assertEqual(options.diff_files, False)
- self.assertEqual(options.is_verbose, False)
+ self.assertIsNone(options.git_commit)
+ self.assertFalse(options.diff_files)
+ self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, 'vs7')
@@ -166,9 +166,9 @@ class ArgumentParserTest(LoggingTestCase):
(files, options) = parse(['--git-diff=commit'])
self.assertEqual(options.git_commit, 'commit')
(files, options) = parse(['--verbose'])
- self.assertEqual(options.is_verbose, True)
+ self.assertTrue(options.is_verbose)
(files, options) = parse(['--diff-files', 'file.txt'])
- self.assertEqual(options.diff_files, True)
+ self.assertTrue(options.diff_files)
# Pass user_rules.
(files, options) = parse(['--filter=+build,-whitespace'])
@@ -200,8 +200,8 @@ class CommandOptionValuesTest(unittest.TestCase):
# Check default parameters.
options = ProcessorOptions()
self.assertEqual(options.filter_rules, [])
- self.assertEqual(options.git_commit, None)
- self.assertEqual(options.is_verbose, False)
+ self.assertIsNone(options.git_commit)
+ self.assertFalse(options.is_verbose)
self.assertEqual(options.min_confidence, 1)
self.assertEqual(options.output_format, "emacs")
@@ -222,7 +222,7 @@ class CommandOptionValuesTest(unittest.TestCase):
output_format="vs7")
self.assertEqual(options.filter_rules, ["+"])
self.assertEqual(options.git_commit, "commit")
- self.assertEqual(options.is_verbose, True)
+ self.assertTrue(options.is_verbose)
self.assertEqual(options.min_confidence, 3)
self.assertEqual(options.output_format, "vs7")
diff --git a/Tools/Scripts/webkitpy/style/patchreader_unittest.py b/Tools/Scripts/webkitpy/style/patchreader_unittest.py
index 983b609e4..f7368ec2b 100644
--- a/Tools/Scripts/webkitpy/style/patchreader_unittest.py
+++ b/Tools/Scripts/webkitpy/style/patchreader_unittest.py
@@ -1,5 +1,3 @@
-#!/usr/bin/python
-#
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2009 Torch Mobile Inc.
# Copyright (C) 2009 Apple Inc. All rights reserved.
@@ -31,7 +29,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.style.patchreader import PatchReader
diff --git a/Tools/Scripts/webkitpy/test/finder_unittest.py b/Tools/Scripts/webkitpy/test/finder_unittest.py
index 694b3884a..f1259ef49 100644
--- a/Tools/Scripts/webkitpy/test/finder_unittest.py
+++ b/Tools/Scripts/webkitpy/test/finder_unittest.py
@@ -21,7 +21,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
@@ -112,7 +112,7 @@ class FinderTest(unittest.TestCase):
self.check_names(['/tmp/another_unittest.py'], [])
finally:
_, _, logs = oc.restore_output()
- self.assertTrue('another_unittest.py' in logs)
+ self.assertIn('another_unittest.py', logs)
# Paths that don't exist are errors.
oc.capture_output()
@@ -120,10 +120,7 @@ class FinderTest(unittest.TestCase):
self.check_names(['/foo/bar/notexist_unittest.py'], [])
finally:
_, _, logs = oc.restore_output()
- self.assertTrue('notexist_unittest.py' in logs)
+ self.assertIn('notexist_unittest.py', logs)
# Names that don't exist are caught later, at load time.
self.check_names(['bar.notexist_unittest'], ['bar.notexist_unittest'])
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py
index 5f16beca6..4270ace5d 100644
--- a/Tools/Scripts/webkitpy/test/main.py
+++ b/Tools/Scripts/webkitpy/test/main.py
@@ -139,7 +139,7 @@ class Tester(object):
self._options.child_processes = 1
import webkitpy.thirdparty.autoinstalled.coverage as coverage
- cov = coverage.coverage(omit=["/usr/*", "*/webkitpy/thirdparty/autoinstalled/*"])
+ cov = coverage.coverage(omit=["/usr/*", "*/webkitpy/thirdparty/autoinstalled/*", "*/webkitpy/thirdparty/BeautifulSoup.py"])
cov.start()
self.printer.write_update("Checking imports ...")
diff --git a/Tools/Scripts/webkitpy/test/main_unittest.py b/Tools/Scripts/webkitpy/test/main_unittest.py
index 031abd65b..8a1b56d29 100644
--- a/Tools/Scripts/webkitpy/test/main_unittest.py
+++ b/Tools/Scripts/webkitpy/test/main_unittest.py
@@ -22,7 +22,7 @@
import logging
import sys
-import unittest
+import unittest2 as unittest
import StringIO
from webkitpy.common.system.filesystem import FileSystem
@@ -70,8 +70,8 @@ class TesterTest(unittest.TestCase):
_, _, logs = oc.restore_output()
root_logger.handlers = root_handlers
- self.assertTrue('No tests to run' in errors.getvalue())
- self.assertTrue('No tests to run' in logs)
+ self.assertIn('No tests to run', errors.getvalue())
+ self.assertIn('No tests to run', logs)
def _find_test_names(self, args):
tester = Tester()
@@ -114,4 +114,4 @@ class TesterTest(unittest.TestCase):
out, _ = proc.communicate()
retcode = proc.returncode
self.assertEqual(retcode, 0)
- self.assertTrue('Cover' in out)
+ self.assertIn('Cover', out)
diff --git a/Tools/Scripts/webkitpy/test/printer.py b/Tools/Scripts/webkitpy/test/printer.py
index 0ec3035b3..b5bea3f9f 100644
--- a/Tools/Scripts/webkitpy/test/printer.py
+++ b/Tools/Scripts/webkitpy/test/printer.py
@@ -24,7 +24,7 @@
import logging
import StringIO
-from webkitpy.common.system import outputcapture
+from webkitpy.common.system.systemhost import SystemHost
from webkitpy.layout_tests.views.metered_stream import MeteredStream
_log = logging.getLogger(__name__)
@@ -36,7 +36,7 @@ class Printer(object):
self.meter = None
self.options = options
self.num_tests = 0
- self.num_completed = 0
+ self.num_started = 0
self.num_errors = 0
self.num_failures = 0
self.running_tests = []
@@ -57,7 +57,8 @@ class Printer(object):
elif options.verbose == 2:
log_level = logging.DEBUG
- self.meter = MeteredStream(self.stream, (options.verbose == 2))
+ self.meter = MeteredStream(self.stream, (options.verbose == 2),
+ number_of_columns=SystemHost().platform.terminal_width())
handler = logging.StreamHandler(self.stream)
# We constrain the level on the handler rather than on the root
@@ -102,6 +103,8 @@ class Printer(object):
handler.addFilter(testing_filter)
if self.options.pass_through:
+ # FIXME: Can't import at top of file, as outputcapture needs unittest2
+ from webkitpy.common.system import outputcapture
outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream
def write_update(self, msg):
@@ -141,7 +144,7 @@ class Printer(object):
if self.options.timing:
suffix += ' %.4fs' % test_time
- self.num_completed += 1
+ self.num_started += 1
if test_name == self.running_tests[0]:
self.completed_tests.insert(0, [test_name, suffix, lines])
@@ -159,11 +162,23 @@ class Printer(object):
self.completed_tests = []
def _test_line(self, test_name, suffix):
- return '[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix)
+ format_string = '[%d/%d] %s%s'
+ status_line = format_string % (self.num_started, self.num_tests, test_name, suffix)
+ if len(status_line) > self.meter.number_of_columns():
+ overflow_columns = len(status_line) - self.meter.number_of_columns()
+ ellipsis = '...'
+ if len(test_name) < overflow_columns + len(ellipsis) + 3:
+ # We don't have enough space even if we elide, just show the test method name.
+ test_name = test_name.split('.')[-1]
+ else:
+ new_length = len(test_name) - overflow_columns - len(ellipsis)
+ prefix = int(new_length / 2)
+ test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
+ return format_string % (self.num_started, self.num_tests, test_name, suffix)
def print_result(self, run_time):
write = self.meter.writeln
- write('Ran %d test%s in %.3fs' % (self.num_completed, self.num_completed != 1 and "s" or "", run_time))
+ write('Ran %d test%s in %.3fs' % (self.num_started, self.num_started != 1 and "s" or "", run_time))
if self.num_failures or self.num_errors:
write('FAILED (failures=%d, errors=%d)\n' % (self.num_failures, self.num_errors))
else:
diff --git a/Tools/Scripts/webkitpy/test/runner_unittest.py b/Tools/Scripts/webkitpy/test/runner_unittest.py
index e5be1921c..46a9507fd 100644
--- a/Tools/Scripts/webkitpy/test/runner_unittest.py
+++ b/Tools/Scripts/webkitpy/test/runner_unittest.py
@@ -23,7 +23,7 @@
import logging
import re
import StringIO
-import unittest
+import unittest2 as unittest
from webkitpy.tool.mocktool import MockOptions
from webkitpy.test.printer import Printer
@@ -95,7 +95,3 @@ class RunnerTest(unittest.TestCase):
self.assertEqual(runner.tests_run, 3)
self.assertEqual(len(runner.failures), 1)
self.assertEqual(len(runner.errors), 1)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/test/skip_unittest.py b/Tools/Scripts/webkitpy/test/skip_unittest.py
index f61a1bb47..bfcb6a67c 100644
--- a/Tools/Scripts/webkitpy/test/skip_unittest.py
+++ b/Tools/Scripts/webkitpy/test/skip_unittest.py
@@ -22,7 +22,7 @@
import StringIO
import logging
-import unittest
+import unittest2 as unittest
from webkitpy.test.skip import skip_if
@@ -72,6 +72,3 @@ class SkipTest(unittest.TestCase):
klass(self.foo_callback).test_foo()
self.assertEqual(self.log_stream.getvalue(), 'Skipping webkitpy.test.skip_unittest.TestSkipFixture: Should see this message.\n')
self.assertFalse(self.foo_was_called)
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init__.py b/Tools/Scripts/webkitpy/thirdparty/__init__.py
index ee5891122..601e2fbb3 100644
--- a/Tools/Scripts/webkitpy/thirdparty/__init__.py
+++ b/Tools/Scripts/webkitpy/thirdparty/__init__.py
@@ -109,7 +109,10 @@ class AutoinstallImportHook(object):
not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "logilab/astng")) or
not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "logilab/common"))):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
- did_install_something = installer.install("http://pypi.python.org/packages/source/l/logilab-common/logilab-common-0.58.1.tar.gz#md5=77298ab2d8bb8b4af9219791e7cee8ce", url_subpath="logilab-common-0.58.1", target_name="logilab/common")
+ files_to_remove = []
+ if sys.platform == 'win32':
+ files_to_remove = ['test/data/write_protected_file.txt']
+ did_install_something = installer.install("http://pypi.python.org/packages/source/l/logilab-common/logilab-common-0.58.1.tar.gz#md5=77298ab2d8bb8b4af9219791e7cee8ce", url_subpath="logilab-common-0.58.1", target_name="logilab/common", files_to_remove=files_to_remove)
did_install_something |= installer.install("http://pypi.python.org/packages/source/l/logilab-astng/logilab-astng-0.24.1.tar.gz#md5=ddaf66e4d85714d9c47a46d4bed406de", url_subpath="logilab-astng-0.24.1", target_name="logilab/astng")
did_install_something |= installer.install('http://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', url_subpath="pylint-0.25.1", target_name="pylint")
return did_install_something
@@ -154,6 +157,10 @@ class AutoinstallImportHook(object):
url_subpath="ircbot.py")
return did_install_something
+ def _install_unittest2(self):
+ self._ensure_autoinstalled_dir_is_in_sys_path()
+ return self._install(url="http://pypi.python.org/packages/source/u/unittest2/unittest2-0.5.1.tar.gz#md5=a0af5cac92bbbfa0c3b0e99571390e0f", url_subpath="unittest2-0.5.1/unittest2")
+
def _install_webpagereplay(self):
did_install_something = False
if not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay")):
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py b/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py
index b3eb75f98..f687ae009 100644
--- a/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py
+++ b/Tools/Scripts/webkitpy/thirdparty/__init___unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -28,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.thirdparty import AutoinstallImportHook
@@ -68,7 +67,3 @@ class ThirdpartyTest(unittest.TestCase):
import webkitpy.thirdparty.autoinstalled.pylint
import webkitpy.thirdparty.autoinstalled.webpagereplay
import webkitpy.thirdparty.autoinstalled.pep8
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
index 60fb33d2c..8235666bb 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_base.py
@@ -39,6 +39,8 @@
# writing/reading.
+import socket
+
from mod_pywebsocket import util
@@ -109,20 +111,34 @@ class StreamBase(object):
ConnectionTerminatedException: when read returns empty string.
"""
- bytes = self._request.connection.read(length)
- if not bytes:
+ try:
+ read_bytes = self._request.connection.read(length)
+ if not read_bytes:
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. Peer (%r) closed connection' %
+ (length, (self._request.connection.remote_addr,)))
+ return read_bytes
+ except socket.error, e:
+ # Catch a socket.error. Because it's not a child class of the
+ # IOError prior to Python 2.6, we cannot omit this except clause.
+ # Use %s rather than %r for the exception to use human friendly
+ # format.
+ raise ConnectionTerminatedException(
+ 'Receiving %d byte failed. socket.error (%s) occurred' %
+ (length, e))
+ except IOError, e:
+ # Also catch an IOError because mod_python throws it.
raise ConnectionTerminatedException(
- 'Receiving %d byte failed. Peer (%r) closed connection' %
- (length, (self._request.connection.remote_addr,)))
- return bytes
+ 'Receiving %d byte failed. IOError (%s) occurred' %
+ (length, e))
- def _write(self, bytes):
+ def _write(self, bytes_to_write):
"""Writes given bytes to connection. In case we catch any exception,
prepends remote address to the exception message and raise again.
"""
try:
- self._request.connection.write(bytes)
+ self._request.connection.write(bytes_to_write)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
@@ -138,12 +154,12 @@ class StreamBase(object):
ConnectionTerminatedException: when read returns empty string.
"""
- bytes = []
+ read_bytes = []
while length > 0:
- new_bytes = self._read(length)
- bytes.append(new_bytes)
- length -= len(new_bytes)
- return ''.join(bytes)
+ new_read_bytes = self._read(length)
+ read_bytes.append(new_read_bytes)
+ length -= len(new_read_bytes)
+ return ''.join(read_bytes)
def _read_until(self, delim_char):
"""Reads bytes until we encounter delim_char. The result will not
@@ -153,13 +169,13 @@ class StreamBase(object):
ConnectionTerminatedException: when read returns empty string.
"""
- bytes = []
+ read_bytes = []
while True:
ch = self._read(1)
if ch == delim_char:
break
- bytes.append(ch)
- return ''.join(bytes)
+ read_bytes.append(ch)
+ return ''.join(read_bytes)
# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
index bd158fa6b..a8a49e3c3 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
@@ -280,7 +280,7 @@ def parse_frame(receive_bytes, logger=None,
if logger.isEnabledFor(common.LOGLEVEL_FINE):
unmask_start = time.time()
- bytes = masker.mask(raw_payload_bytes)
+ unmasked_bytes = masker.mask(raw_payload_bytes)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
@@ -288,7 +288,7 @@ def parse_frame(receive_bytes, logger=None,
'Done unmasking payload data at %s MB/s',
payload_length / (time.time() - unmask_start) / 1000 / 1000)
- return opcode, bytes, fin, rsv1, rsv2, rsv3
+ return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
class FragmentedFrameBuilder(object):
@@ -389,9 +389,6 @@ class StreamOptions(object):
def __init__(self):
"""Constructs StreamOptions."""
- # Enables deflate-stream extension.
- self.deflate_stream = False
-
# Filters applied to frames.
self.outgoing_frame_filters = []
self.incoming_frame_filters = []
@@ -403,9 +400,6 @@ class StreamOptions(object):
self.encode_text_message_to_utf8 = True
self.mask_send = False
self.unmask_receive = True
- # RFC6455 disallows fragmented control frames, but mux extension
- # relaxes the restriction.
- self.allow_fragmented_control_frame = False
class Stream(StreamBase):
@@ -426,10 +420,6 @@ class Stream(StreamBase):
self._options = options
- if self._options.deflate_stream:
- self._logger.debug('Setup filter for deflate-stream')
- self._request = util.DeflateRequest(self._request)
-
self._request.client_terminated = False
self._request.server_terminated = False
@@ -463,10 +453,10 @@ class Stream(StreamBase):
unmask_receive=self._options.unmask_receive)
def _receive_frame_as_frame_object(self):
- opcode, bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
+ opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
- opcode=opcode, payload=bytes)
+ opcode=opcode, payload=unmasked_bytes)
def receive_filtered_frame(self):
"""Receives a frame and applies frame filters and message filters.
@@ -602,8 +592,7 @@ class Stream(StreamBase):
else:
# Start of fragmentation frame
- if (not self._options.allow_fragmented_control_frame and
- common.is_control_opcode(frame.opcode)):
+ if common.is_control_opcode(frame.opcode):
raise InvalidFrameException(
'Control frames must not be fragmented')
@@ -647,8 +636,9 @@ class Stream(StreamBase):
self._request.ws_close_code,
self._request.ws_close_reason)
- # Drain junk data after the close frame if necessary.
- self._drain_received_data()
+ # As we've received a close frame, no more data is coming over the
+ # socket. We can now safely close the socket without worrying about
+ # RST sending.
if self._request.server_terminated:
self._logger.debug(
@@ -672,7 +662,7 @@ class Stream(StreamBase):
reason = ''
self._send_closing_handshake(code, reason)
self._logger.debug(
- 'Sent ack for client-initiated closing handshake '
+ 'Acknowledged closing handshake initiated by the peer '
'(code=%r, reason=%r)', code, reason)
def _process_ping_message(self, message):
@@ -815,13 +805,15 @@ class Stream(StreamBase):
self._write(frame)
- def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
+ def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
+ wait_response=True):
"""Closes a WebSocket connection.
Args:
code: Status code for close frame. If code is None, a close
frame with empty body will be sent.
reason: string representing close reason.
+ wait_response: True when caller want to wait the response.
Raises:
BadOperationException: when reason is specified with code None
or reason is not an instance of both str and unicode.
@@ -844,11 +836,11 @@ class Stream(StreamBase):
self._send_closing_handshake(code, reason)
self._logger.debug(
- 'Sent server-initiated closing handshake (code=%r, reason=%r)',
+ 'Initiated closing handshake (code=%r, reason=%r)',
code, reason)
if (code == common.STATUS_GOING_AWAY or
- code == common.STATUS_PROTOCOL_ERROR):
+ code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
# It doesn't make sense to wait for a close frame if the reason is
# protocol error or that the server is going away. For some of
# other reasons, it might not make sense to wait for a close frame,
@@ -891,25 +883,5 @@ class Stream(StreamBase):
return self._original_opcode
- def _drain_received_data(self):
- """Drains unread data in the receive buffer to avoid sending out TCP
- RST packet. This is because when deflate-stream is enabled, some
- DEFLATE block for flushing data may follow a close frame. If any data
- remains in the receive buffer of a socket when the socket is closed,
- it sends out TCP RST packet to the other peer.
-
- Since mod_python's mp_conn object doesn't support non-blocking read,
- we perform this only when pywebsocket is running in standalone mode.
- """
-
- # If self._options.deflate_stream is true, self._request is
- # DeflateRequest, so we can get wrapped request object by
- # self._request._request.
- #
- # Only _StandaloneRequest has _drain_received_data method.
- if (self._options.deflate_stream and
- ('_drain_received_data' in dir(self._request._request))):
- self._request._request._drain_received_data()
-
# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
index 2388379c0..afa123368 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/common.py
@@ -101,10 +101,10 @@ SEC_WEBSOCKET_KEY2_HEADER = 'Sec-WebSocket-Key2'
SEC_WEBSOCKET_LOCATION_HEADER = 'Sec-WebSocket-Location'
# Extensions
-DEFLATE_STREAM_EXTENSION = 'deflate-stream'
DEFLATE_FRAME_EXTENSION = 'deflate-frame'
PERFRAME_COMPRESSION_EXTENSION = 'perframe-compress'
PERMESSAGE_COMPRESSION_EXTENSION = 'permessage-compress'
+PERMESSAGE_DEFLATE_EXTENSION = 'permessage-deflate'
X_WEBKIT_DEFLATE_FRAME_EXTENSION = 'x-webkit-deflate-frame'
X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION = 'x-webkit-permessage-compress'
MUX_EXTENSION = 'mux_DO_NOT_USE'
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
index 25905f180..96c91e0c9 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/dispatch.py
@@ -255,6 +255,9 @@ class Dispatcher(object):
try:
do_extra_handshake_(request)
except handshake.AbortedByUserException, e:
+ # Re-raise to tell the caller of this function to finish this
+ # connection without sending any error.
+ self._logger.debug('%s', util.get_stack_trace())
raise
except Exception, e:
util.prepend_message_to_exception(
@@ -294,11 +297,12 @@ class Dispatcher(object):
request.ws_stream.close_connection()
# Catch non-critical exceptions the handler didn't handle.
except handshake.AbortedByUserException, e:
- self._logger.debug('%s', e)
+ self._logger.debug('%s', util.get_stack_trace())
raise
except msgutil.BadOperationException, e:
self._logger.debug('%s', e)
- request.ws_stream.close_connection(common.STATUS_ABNORMAL_CLOSURE)
+ request.ws_stream.close_connection(
+ common.STATUS_INTERNAL_ENDPOINT_ERROR)
except msgutil.InvalidFrameException, e:
# InvalidFrameException must be caught before
# ConnectionTerminatedException that catches InvalidFrameException.
@@ -314,6 +318,8 @@ class Dispatcher(object):
except msgutil.ConnectionTerminatedException, e:
self._logger.debug('%s', e)
except Exception, e:
+ # Any other exceptions are forwarded to the caller of this
+ # function.
util.prepend_message_to_exception(
'%s raised exception for %s: ' % (
_TRANSFER_DATA_HANDLER_NAME, request.ws_resource),
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
index 03dbf9ee1..552d2c072 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/extensions.py
@@ -34,83 +34,114 @@ from mod_pywebsocket.http_header_util import quote_if_necessary
_available_processors = {}
+_compression_extension_names = []
class ExtensionProcessorInterface(object):
- def name(self):
- return None
+ def __init__(self, request):
+ self._request = request
+ self._active = True
- def get_extension_response(self):
+ def request(self):
+ return self._request
+
+ def name(self):
return None
- def setup_stream_options(self, stream_options):
+ def check_consistency_with_other_processors(self, processors):
pass
+ def set_active(self, active):
+ self._active = active
-class DeflateStreamExtensionProcessor(ExtensionProcessorInterface):
- """WebSocket DEFLATE stream extension processor.
-
- Specification:
- Section 9.2.1 in
- http://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10
- """
-
- def __init__(self, request):
- self._logger = util.get_class_logger(self)
-
- self._request = request
+ def is_active(self):
+ return self._active
- def name(self):
- return common.DEFLATE_STREAM_EXTENSION
+ def _get_extension_response_internal(self):
+ return None
def get_extension_response(self):
- if len(self._request.get_parameter_names()) != 0:
- return None
-
- self._logger.debug(
- 'Enable %s extension', common.DEFLATE_STREAM_EXTENSION)
+ if self._active:
+ response = self._get_extension_response_internal()
+ if response is None:
+ self._active = False
+ return response
+ return None
- return common.ExtensionParameter(common.DEFLATE_STREAM_EXTENSION)
+ def _setup_stream_options_internal(self, stream_options):
+ pass
def setup_stream_options(self, stream_options):
- stream_options.deflate_stream = True
+ if self._active:
+ self._setup_stream_options_internal(stream_options)
-_available_processors[common.DEFLATE_STREAM_EXTENSION] = (
- DeflateStreamExtensionProcessor)
-
-
-def _log_compression_ratio(logger, original_bytes, total_original_bytes,
- filtered_bytes, total_filtered_bytes):
+def _log_outgoing_compression_ratio(
+ logger, original_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
- average_ratio = float('inf')
if original_bytes != 0:
ratio = float(filtered_bytes) / original_bytes
- if total_original_bytes != 0:
- average_ratio = (
- float(total_filtered_bytes) / total_original_bytes)
- logger.debug('Outgoing compress ratio: %f (average: %f)' %
- (ratio, average_ratio))
+ logger.debug('Outgoing compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
-def _log_decompression_ratio(logger, received_bytes, total_received_bytes,
- filtered_bytes, total_filtered_bytes):
+
+def _log_incoming_compression_ratio(
+ logger, received_bytes, filtered_bytes, average_ratio):
# Print inf when ratio is not available.
ratio = float('inf')
- average_ratio = float('inf')
- if received_bytes != 0:
+ if filtered_bytes != 0:
ratio = float(received_bytes) / filtered_bytes
- if total_filtered_bytes != 0:
- average_ratio = (
- float(total_received_bytes) / total_filtered_bytes)
- logger.debug('Incoming compress ratio: %f (average: %f)' %
- (ratio, average_ratio))
+
+ logger.debug('Incoming compression ratio: %f (average: %f)' %
+ (ratio, average_ratio))
+
+
+def _parse_window_bits(bits):
+ """Return parsed integer value iff the given string conforms to the
+ grammar of the window bits extension parameters.
+ """
+
+ if bits is None:
+ raise ValueError('Value is required')
+
+ # For non integer values such as "10.0", ValueError will be raised.
+ int_bits = int(bits)
+
+ # First condition is to drop leading zero case e.g. "08".
+ if bits != str(int_bits) or int_bits < 8 or int_bits > 15:
+ raise ValueError('Invalid value: %r' % bits)
+
+ return int_bits
+
+
+class _AverageRatioCalculator(object):
+ """Stores total bytes of original and result data, and calculates average
+ result / original ratio.
+ """
+
+ def __init__(self):
+ self._total_original_bytes = 0
+ self._total_result_bytes = 0
+
+ def add_original_bytes(self, value):
+ self._total_original_bytes += value
+
+ def add_result_bytes(self, value):
+ self._total_result_bytes += value
+
+ def get_average_ratio(self):
+ if self._total_original_bytes != 0:
+ return (float(self._total_result_bytes) /
+ self._total_original_bytes)
+ else:
+ return float('inf')
class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
- """WebSocket Per-frame DEFLATE extension processor.
+ """deflate-frame extension processor.
Specification:
http://tools.ietf.org/html/draft-tyoshino-hybi-websocket-perframe-deflate
@@ -120,34 +151,38 @@ class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
_NO_CONTEXT_TAKEOVER_PARAM = 'no_context_takeover'
def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
- self._request = request
-
self._response_window_bits = None
self._response_no_context_takeover = False
self._bfinal = False
- # Counters for statistics.
-
- # Total number of outgoing bytes supplied to this filter.
- self._total_outgoing_payload_bytes = 0
- # Total number of bytes sent to the network after applying this filter.
- self._total_filtered_outgoing_payload_bytes = 0
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
- # Total number of bytes received from the network.
- self._total_incoming_payload_bytes = 0
- # Total number of incoming bytes obtained after applying this filter.
- self._total_filtered_incoming_payload_bytes = 0
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def name(self):
return common.DEFLATE_FRAME_EXTENSION
- def get_extension_response(self):
+ def _get_extension_response_internal(self):
# Any unknown parameter will be just ignored.
- window_bits = self._request.get_parameter_value(
- self._WINDOW_BITS_PARAM)
+ window_bits = None
+ if self._request.has_parameter(self._WINDOW_BITS_PARAM):
+ window_bits = self._request.get_parameter_value(
+ self._WINDOW_BITS_PARAM)
+ try:
+ window_bits = _parse_window_bits(window_bits)
+ except ValueError, e:
+ return None
+
no_context_takeover = self._request.has_parameter(
self._NO_CONTEXT_TAKEOVER_PARAM)
if (no_context_takeover and
@@ -155,18 +190,10 @@ class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
self._NO_CONTEXT_TAKEOVER_PARAM) is not None):
return None
- if window_bits is not None:
- try:
- window_bits = int(window_bits)
- except ValueError, e:
- return None
- if window_bits < 8 or window_bits > 15:
- return None
-
- self._deflater = util._RFC1979Deflater(
+ self._rfc1979_deflater = util._RFC1979Deflater(
window_bits, no_context_takeover)
- self._inflater = util._RFC1979Inflater()
+ self._rfc1979_inflater = util._RFC1979Inflater()
self._compress_outgoing = True
@@ -191,7 +218,7 @@ class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
return response
- def setup_stream_options(self, stream_options):
+ def _setup_stream_options_internal(self, stream_options):
class _OutgoingFilter(object):
@@ -235,25 +262,28 @@ class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""
original_payload_size = len(frame.payload)
- self._total_outgoing_payload_bytes += original_payload_size
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
if (not self._compress_outgoing or
common.is_control_opcode(frame.opcode)):
- self._total_filtered_outgoing_payload_bytes += (
- original_payload_size)
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ original_payload_size)
return
- frame.payload = self._deflater.filter(
+ frame.payload = self._rfc1979_deflater.filter(
frame.payload, bfinal=self._bfinal)
frame.rsv1 = 1
filtered_payload_size = len(frame.payload)
- self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
- _log_compression_ratio(self._logger, original_payload_size,
- self._total_outgoing_payload_bytes,
- filtered_payload_size,
- self._total_filtered_outgoing_payload_bytes)
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
def _incoming_filter(self, frame):
"""Transform incoming frames. This method is called only by
@@ -261,33 +291,35 @@ class DeflateFrameExtensionProcessor(ExtensionProcessorInterface):
"""
received_payload_size = len(frame.payload)
- self._total_incoming_payload_bytes += received_payload_size
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
if frame.rsv1 != 1 or common.is_control_opcode(frame.opcode):
- self._total_filtered_incoming_payload_bytes += (
- received_payload_size)
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ received_payload_size)
return
- frame.payload = self._inflater.filter(frame.payload)
+ frame.payload = self._rfc1979_inflater.filter(frame.payload)
frame.rsv1 = 0
filtered_payload_size = len(frame.payload)
- self._total_filtered_incoming_payload_bytes += filtered_payload_size
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
- _log_decompression_ratio(self._logger, received_payload_size,
- self._total_incoming_payload_bytes,
- filtered_payload_size,
- self._total_filtered_incoming_payload_bytes)
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
_available_processors[common.DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.DEFLATE_FRAME_EXTENSION)
-
-# Adding vendor-prefixed deflate-frame extension.
-# TODO(bashi): Remove this after WebKit stops using vendor prefix.
_available_processors[common.X_WEBKIT_DEFLATE_FRAME_EXTENSION] = (
DeflateFrameExtensionProcessor)
+_compression_extension_names.append(common.X_WEBKIT_DEFLATE_FRAME_EXTENSION)
def _parse_compression_method(data):
@@ -306,13 +338,13 @@ def _create_accepted_method_desc(method_name, method_params):
class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
- """Base class for Per-frame and Per-message compression extension."""
+ """Base class for perframe-compress and permessage-compress extension."""
_METHOD_PARAM = 'method'
def __init__(self, request):
+ ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
- self._request = request
self._compression_method_name = None
self._compression_processor = None
self._compression_processor_hook = None
@@ -357,7 +389,7 @@ class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
self._compression_processor = compression_processor
return processor_response
- def get_extension_response(self):
+ def _get_extension_response_internal(self):
processor_response = self._get_compression_processor_response()
if processor_response is None:
return None
@@ -372,7 +404,7 @@ class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
(self._request.name(), self._compression_method_name))
return response
- def setup_stream_options(self, stream_options):
+ def _setup_stream_options_internal(self, stream_options):
if self._compression_processor is None:
return
self._compression_processor.setup_stream_options(stream_options)
@@ -384,8 +416,8 @@ class CompressionExtensionProcessorBase(ExtensionProcessorInterface):
return self._compression_processor
-class PerFrameCompressionExtensionProcessor(CompressionExtensionProcessorBase):
- """WebSocket Per-frame compression extension processor.
+class PerFrameCompressExtensionProcessor(CompressionExtensionProcessorBase):
+ """perframe-compress processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-perframe-compression
@@ -406,56 +438,66 @@ class PerFrameCompressionExtensionProcessor(CompressionExtensionProcessorBase):
_available_processors[common.PERFRAME_COMPRESSION_EXTENSION] = (
- PerFrameCompressionExtensionProcessor)
+ PerFrameCompressExtensionProcessor)
+_compression_extension_names.append(common.PERFRAME_COMPRESSION_EXTENSION)
-class DeflateMessageProcessor(ExtensionProcessorInterface):
- """Per-message deflate processor."""
+class PerMessageDeflateExtensionProcessor(ExtensionProcessorInterface):
+ """permessage-deflate extension processor. It's also used for
+ permessage-compress extension when the deflate method is chosen.
+
+ Specification:
+ http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression-08
+ """
_S2C_MAX_WINDOW_BITS_PARAM = 's2c_max_window_bits'
_S2C_NO_CONTEXT_TAKEOVER_PARAM = 's2c_no_context_takeover'
_C2S_MAX_WINDOW_BITS_PARAM = 'c2s_max_window_bits'
_C2S_NO_CONTEXT_TAKEOVER_PARAM = 'c2s_no_context_takeover'
- def __init__(self, request):
- self._request = request
+ def __init__(self, request, draft08=True):
+ """Construct PerMessageDeflateExtensionProcessor
+
+ Args:
+ draft08: Follow the constraints on the parameters that were not
+ specified for permessage-compress but are specified for
+ permessage-deflate as on
+ draft-ietf-hybi-permessage-compression-08.
+ """
+
+ ExtensionProcessorInterface.__init__(self, request)
self._logger = util.get_class_logger(self)
self._c2s_max_window_bits = None
self._c2s_no_context_takeover = False
- self._bfinal = False
-
- self._compress_outgoing_enabled = False
- # True if a message is fragmented and compression is ongoing.
- self._compress_ongoing = False
-
- # Counters for statistics.
-
- # Total number of outgoing bytes supplied to this filter.
- self._total_outgoing_payload_bytes = 0
- # Total number of bytes sent to the network after applying this filter.
- self._total_filtered_outgoing_payload_bytes = 0
-
- # Total number of bytes received from the network.
- self._total_incoming_payload_bytes = 0
- # Total number of incoming bytes obtained after applying this filter.
- self._total_filtered_incoming_payload_bytes = 0
+ self._draft08 = draft08
def name(self):
return 'deflate'
- def get_extension_response(self):
- # Any unknown parameter will be just ignored.
+ def _get_extension_response_internal(self):
+ if self._draft08:
+ for name in self._request.get_parameter_names():
+ if name not in [self._S2C_MAX_WINDOW_BITS_PARAM,
+ self._S2C_NO_CONTEXT_TAKEOVER_PARAM,
+ self._C2S_MAX_WINDOW_BITS_PARAM]:
+ self._logger.debug('Unknown parameter: %r', name)
+ return None
+ else:
+ # Any unknown parameter will be just ignored.
+ pass
- s2c_max_window_bits = self._request.get_parameter_value(
- self._S2C_MAX_WINDOW_BITS_PARAM)
- if s2c_max_window_bits is not None:
+ s2c_max_window_bits = None
+ if self._request.has_parameter(self._S2C_MAX_WINDOW_BITS_PARAM):
+ s2c_max_window_bits = self._request.get_parameter_value(
+ self._S2C_MAX_WINDOW_BITS_PARAM)
try:
- s2c_max_window_bits = int(s2c_max_window_bits)
+ s2c_max_window_bits = _parse_window_bits(s2c_max_window_bits)
except ValueError, e:
- return None
- if s2c_max_window_bits < 8 or s2c_max_window_bits > 15:
+ self._logger.debug('Bad %s parameter: %r',
+ self._S2C_MAX_WINDOW_BITS_PARAM,
+ e)
return None
s2c_no_context_takeover = self._request.has_parameter(
@@ -463,14 +505,32 @@ class DeflateMessageProcessor(ExtensionProcessorInterface):
if (s2c_no_context_takeover and
self._request.get_parameter_value(
self._S2C_NO_CONTEXT_TAKEOVER_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value: %r',
+ self._S2C_NO_CONTEXT_TAKEOVER_PARAM,
+ s2c_no_context_takeover)
return None
- self._deflater = util._RFC1979Deflater(
+ c2s_max_window_bits = self._request.has_parameter(
+ self._C2S_MAX_WINDOW_BITS_PARAM)
+ if (self._draft08 and
+ c2s_max_window_bits and
+ self._request.get_parameter_value(
+ self._C2S_MAX_WINDOW_BITS_PARAM) is not None):
+ self._logger.debug('%s parameter must not have a value in a '
+ 'client\'s opening handshake: %r',
+ self._C2S_MAX_WINDOW_BITS_PARAM,
+ c2s_max_window_bits)
+ return None
+
+ self._rfc1979_deflater = util._RFC1979Deflater(
s2c_max_window_bits, s2c_no_context_takeover)
- self._inflater = util._RFC1979Inflater()
+ self._rfc1979_inflater = util._RFC1979Inflater()
- self._compress_outgoing_enabled = True
+ self._framer = _PerMessageDeflateFramer(
+ s2c_max_window_bits, s2c_no_context_takeover)
+ self._framer.set_bfinal(False)
+ self._framer.set_compress_outgoing_enabled(True)
response = common.ExtensionParameter(self._request.name())
@@ -483,9 +543,15 @@ class DeflateMessageProcessor(ExtensionProcessorInterface):
self._S2C_NO_CONTEXT_TAKEOVER_PARAM, None)
if self._c2s_max_window_bits is not None:
+ if self._draft08 and c2s_max_window_bits:
+ self._logger.debug('Processor is configured to use %s but '
+ 'the client cannot accept it',
+ self._C2S_MAX_WINDOW_BITS_PARAM)
+ return None
response.add_parameter(
self._C2S_MAX_WINDOW_BITS_PARAM,
str(self._c2s_max_window_bits))
+
if self._c2s_no_context_takeover:
response.add_parameter(
self._C2S_NO_CONTEXT_TAKEOVER_PARAM, None)
@@ -502,100 +568,99 @@ class DeflateMessageProcessor(ExtensionProcessorInterface):
return response
- def setup_stream_options(self, stream_options):
- class _OutgoingMessageFilter(object):
-
- def __init__(self, parent):
- self._parent = parent
+ def _setup_stream_options_internal(self, stream_options):
+ self._framer.setup_stream_options(stream_options)
- def filter(self, message, end=True, binary=False):
- return self._parent._process_outgoing_message(
- message, end, binary)
+ def set_c2s_max_window_bits(self, value):
+ """If this option is specified, this class adds the c2s_max_window_bits
+ extension parameter to the handshake response, but doesn't reduce the
+ LZ77 sliding window size of its inflater. I.e., you can use this for
+ testing client implementation but cannot reduce memory usage of this
+ class.
+
+ If this method has been called with True and an offer without the
+ c2s_max_window_bits extension parameter is received,
+ - (When processing the permessage-deflate extension) this processor
+ declines the request.
+ - (When processing the permessage-compress extension) this processor
+ accepts the request.
+ """
- class _IncomingMessageFilter(object):
+ self._c2s_max_window_bits = value
- def __init__(self, parent):
- self._parent = parent
- self._decompress_next_message = False
+ def set_c2s_no_context_takeover(self, value):
+ """If this option is specified, this class adds the
+ c2s_no_context_takeover extension parameter to the handshake response,
+ but doesn't reset inflater for each message. I.e., you can use this for
+ testing client implementation but cannot reduce memory usage of this
+ class.
+ """
- def decompress_next_message(self):
- self._decompress_next_message = True
+ self._c2s_no_context_takeover = value
- def filter(self, message):
- message = self._parent._process_incoming_message(
- message, self._decompress_next_message)
- self._decompress_next_message = False
- return message
+ def set_bfinal(self, value):
+ self._framer.set_bfinal(value)
- self._outgoing_message_filter = _OutgoingMessageFilter(self)
- self._incoming_message_filter = _IncomingMessageFilter(self)
- stream_options.outgoing_message_filters.append(
- self._outgoing_message_filter)
- stream_options.incoming_message_filters.append(
- self._incoming_message_filter)
+ def enable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(True)
- class _OutgoingFrameFilter(object):
+ def disable_outgoing_compression(self):
+ self._framer.set_compress_outgoing_enabled(False)
- def __init__(self, parent):
- self._parent = parent
- self._set_compression_bit = False
- def set_compression_bit(self):
- self._set_compression_bit = True
+class _PerMessageDeflateFramer(object):
+ """A framer for extensions with per-message DEFLATE feature."""
- def filter(self, frame):
- self._parent._process_outgoing_frame(
- frame, self._set_compression_bit)
- self._set_compression_bit = False
+ def __init__(self, deflate_max_window_bits, deflate_no_context_takeover):
+ self._logger = util.get_class_logger(self)
- class _IncomingFrameFilter(object):
+ self._rfc1979_deflater = util._RFC1979Deflater(
+ deflate_max_window_bits, deflate_no_context_takeover)
- def __init__(self, parent):
- self._parent = parent
+ self._rfc1979_inflater = util._RFC1979Inflater()
- def filter(self, frame):
- self._parent._process_incoming_frame(frame)
+ self._bfinal = False
- self._outgoing_frame_filter = _OutgoingFrameFilter(self)
- self._incoming_frame_filter = _IncomingFrameFilter(self)
- stream_options.outgoing_frame_filters.append(
- self._outgoing_frame_filter)
- stream_options.incoming_frame_filters.append(
- self._incoming_frame_filter)
+ self._compress_outgoing_enabled = False
- stream_options.encode_text_message_to_utf8 = False
+ # True if a message is fragmented and compression is ongoing.
+ self._compress_ongoing = False
- def set_c2s_max_window_bits(self, value):
- self._c2s_max_window_bits = value
+ # Calculates
+ # (Total outgoing bytes supplied to this filter) /
+ # (Total bytes sent to the network after applying this filter)
+ self._outgoing_average_ratio_calculator = _AverageRatioCalculator()
- def set_c2s_no_context_takeover(self, value):
- self._c2s_no_context_takeover = value
+ # Calculates
+ # (Total bytes received from the network) /
+ # (Total incoming bytes obtained after applying this filter)
+ self._incoming_average_ratio_calculator = _AverageRatioCalculator()
def set_bfinal(self, value):
self._bfinal = value
- def enable_outgoing_compression(self):
- self._compress_outgoing_enabled = True
-
- def disable_outgoing_compression(self):
- self._compress_outgoing_enabled = False
+ def set_compress_outgoing_enabled(self, value):
+ self._compress_outgoing_enabled = value
def _process_incoming_message(self, message, decompress):
if not decompress:
return message
received_payload_size = len(message)
- self._total_incoming_payload_bytes += received_payload_size
+ self._incoming_average_ratio_calculator.add_result_bytes(
+ received_payload_size)
- message = self._inflater.filter(message)
+ message = self._rfc1979_inflater.filter(message)
filtered_payload_size = len(message)
- self._total_filtered_incoming_payload_bytes += filtered_payload_size
+ self._incoming_average_ratio_calculator.add_original_bytes(
+ filtered_payload_size)
- _log_decompression_ratio(self._logger, received_payload_size,
- self._total_incoming_payload_bytes,
- filtered_payload_size,
- self._total_filtered_incoming_payload_bytes)
+ _log_incoming_compression_ratio(
+ self._logger,
+ received_payload_size,
+ filtered_payload_size,
+ self._incoming_average_ratio_calculator.get_average_ratio())
return message
@@ -607,18 +672,21 @@ class DeflateMessageProcessor(ExtensionProcessorInterface):
return message
original_payload_size = len(message)
- self._total_outgoing_payload_bytes += original_payload_size
+ self._outgoing_average_ratio_calculator.add_original_bytes(
+ original_payload_size)
- message = self._deflater.filter(
+ message = self._rfc1979_deflater.filter(
message, flush=end, bfinal=self._bfinal)
filtered_payload_size = len(message)
- self._total_filtered_outgoing_payload_bytes += filtered_payload_size
+ self._outgoing_average_ratio_calculator.add_result_bytes(
+ filtered_payload_size)
- _log_compression_ratio(self._logger, original_payload_size,
- self._total_outgoing_payload_bytes,
- filtered_payload_size,
- self._total_filtered_outgoing_payload_bytes)
+ _log_outgoing_compression_ratio(
+ self._logger,
+ original_payload_size,
+ filtered_payload_size,
+ self._outgoing_average_ratio_calculator.get_average_ratio())
if not self._compress_ongoing:
self._outgoing_frame_filter.set_compression_bit()
@@ -637,10 +705,81 @@ class DeflateMessageProcessor(ExtensionProcessorInterface):
frame.rsv1 = 1
+ def setup_stream_options(self, stream_options):
+ """Creates filters and sets them to the StreamOptions."""
+
+ class _OutgoingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, message, end=True, binary=False):
+ return self._parent._process_outgoing_message(
+ message, end, binary)
+
+ class _IncomingMessageFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._decompress_next_message = False
+
+ def decompress_next_message(self):
+ self._decompress_next_message = True
+
+ def filter(self, message):
+ message = self._parent._process_incoming_message(
+ message, self._decompress_next_message)
+ self._decompress_next_message = False
+ return message
+
+ self._outgoing_message_filter = _OutgoingMessageFilter(self)
+ self._incoming_message_filter = _IncomingMessageFilter(self)
+ stream_options.outgoing_message_filters.append(
+ self._outgoing_message_filter)
+ stream_options.incoming_message_filters.append(
+ self._incoming_message_filter)
+
+ class _OutgoingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+ self._set_compression_bit = False
+
+ def set_compression_bit(self):
+ self._set_compression_bit = True
+
+ def filter(self, frame):
+ self._parent._process_outgoing_frame(
+ frame, self._set_compression_bit)
+ self._set_compression_bit = False
+
+ class _IncomingFrameFilter(object):
+
+ def __init__(self, parent):
+ self._parent = parent
+
+ def filter(self, frame):
+ self._parent._process_incoming_frame(frame)
+
+ self._outgoing_frame_filter = _OutgoingFrameFilter(self)
+ self._incoming_frame_filter = _IncomingFrameFilter(self)
+ stream_options.outgoing_frame_filters.append(
+ self._outgoing_frame_filter)
+ stream_options.incoming_frame_filters.append(
+ self._incoming_frame_filter)
+
+ stream_options.encode_text_message_to_utf8 = False
+
-class PerMessageCompressionExtensionProcessor(
+_available_processors[common.PERMESSAGE_DEFLATE_EXTENSION] = (
+ PerMessageDeflateExtensionProcessor)
+# TODO(tyoshino): Reorganize class names.
+_compression_extension_names.append('deflate')
+
+
+class PerMessageCompressExtensionProcessor(
CompressionExtensionProcessorBase):
- """WebSocket Per-message compression extension processor.
+ """permessage-compress extension processor.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-permessage-compression
@@ -656,18 +795,13 @@ class PerMessageCompressionExtensionProcessor(
def _lookup_compression_processor(self, method_desc):
if method_desc.name() == self._DEFLATE_METHOD:
- return DeflateMessageProcessor(method_desc)
+ return PerMessageDeflateExtensionProcessor(method_desc, False)
return None
_available_processors[common.PERMESSAGE_COMPRESSION_EXTENSION] = (
- PerMessageCompressionExtensionProcessor)
-
-
-# Adding vendor-prefixed permessage-compress extension.
-# TODO(bashi): Remove this after WebKit stops using vendor prefix.
-_available_processors[common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION] = (
- PerMessageCompressionExtensionProcessor)
+ PerMessageCompressExtensionProcessor)
+_compression_extension_names.append(common.PERMESSAGE_COMPRESSION_EXTENSION)
class MuxExtensionProcessor(ExtensionProcessorInterface):
@@ -676,52 +810,85 @@ class MuxExtensionProcessor(ExtensionProcessorInterface):
_QUOTA_PARAM = 'quota'
def __init__(self, request):
- self._request = request
+ ExtensionProcessorInterface.__init__(self, request)
+ self._quota = 0
+ self._extensions = []
def name(self):
return common.MUX_EXTENSION
- def get_extension_response(self, ws_request,
- logical_channel_extensions):
- # Mux extension cannot be used after extensions that depend on
- # frame boundary, extension data field, or any reserved bits
- # which are attributed to each frame.
- for extension in logical_channel_extensions:
- name = extension.name()
- if (name == common.PERFRAME_COMPRESSION_EXTENSION or
- name == common.DEFLATE_FRAME_EXTENSION or
- name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
- return None
-
+ def check_consistency_with_other_processors(self, processors):
+ before_mux = True
+ for processor in processors:
+ name = processor.name()
+ if name == self.name():
+ before_mux = False
+ continue
+ if not processor.is_active():
+ continue
+ if before_mux:
+ # Mux extension cannot be used after extensions
+ # that depend on frame boundary, extension data field, or any
+ # reserved bits which are attributed to each frame.
+ if (name == common.PERFRAME_COMPRESSION_EXTENSION or
+ name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION):
+ self.set_active(False)
+ return
+ else:
+ # Mux extension should not be applied before any history-based
+ # compression extension.
+ if (name == common.PERFRAME_COMPRESSION_EXTENSION or
+ name == common.DEFLATE_FRAME_EXTENSION or
+ name == common.X_WEBKIT_DEFLATE_FRAME_EXTENSION or
+ name == common.PERMESSAGE_COMPRESSION_EXTENSION or
+ name == common.X_WEBKIT_PERMESSAGE_COMPRESSION_EXTENSION):
+ self.set_active(False)
+ return
+
+ def _get_extension_response_internal(self):
+ self._active = False
quota = self._request.get_parameter_value(self._QUOTA_PARAM)
- if quota is None:
- ws_request.mux_quota = 0
- else:
+ if quota is not None:
try:
quota = int(quota)
except ValueError, e:
return None
if quota < 0 or quota >= 2 ** 32:
return None
- ws_request.mux_quota = quota
+ self._quota = quota
- ws_request.mux = True
- ws_request.mux_extensions = logical_channel_extensions
+ self._active = True
return common.ExtensionParameter(common.MUX_EXTENSION)
- def setup_stream_options(self, stream_options):
+ def _setup_stream_options_internal(self, stream_options):
pass
+ def set_quota(self, quota):
+ self._quota = quota
+
+ def quota(self):
+ return self._quota
+
+ def set_extensions(self, extensions):
+ self._extensions = extensions
+
+ def extensions(self):
+ return self._extensions
+
_available_processors[common.MUX_EXTENSION] = MuxExtensionProcessor
def get_extension_processor(extension_request):
- global _available_processors
processor_class = _available_processors.get(extension_request.name())
if processor_class is None:
return None
return processor_class(extension_request)
+def is_compression_extension(extension_name):
+ return extension_name in _compression_extension_names
+
+
# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
index e5c94ca90..c993a584b 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/_base.py
@@ -84,42 +84,29 @@ def get_default_port(is_secure):
return common.DEFAULT_WEB_SOCKET_PORT
-def validate_subprotocol(subprotocol, hixie):
+def validate_subprotocol(subprotocol):
"""Validate a value in the Sec-WebSocket-Protocol field.
- See
- - RFC 6455: Section 4.1., 4.2.2., and 4.3.
- - HyBi 00: Section 4.1. Opening handshake
-
- Args:
- hixie: if True, checks if characters in subprotocol are in range
- between U+0020 and U+007E. It's required by HyBi 00 but not by
- RFC 6455.
+ See the Section 4.1., 4.2.2., and 4.3. of RFC 6455.
"""
if not subprotocol:
raise HandshakeException('Invalid subprotocol name: empty')
- if hixie:
- # Parameter should be in the range U+0020 to U+007E.
- for c in subprotocol:
- if not 0x20 <= ord(c) <= 0x7e:
- raise HandshakeException(
- 'Illegal character in subprotocol name: %r' % c)
- else:
- # Parameter should be encoded HTTP token.
- state = http_header_util.ParsingState(subprotocol)
- token = http_header_util.consume_token(state)
- rest = http_header_util.peek(state)
- # If |rest| is not None, |subprotocol| is not one token or invalid. If
- # |rest| is None, |token| must not be None because |subprotocol| is
- # concatenation of |token| and |rest| and is not None.
- if rest is not None:
- raise HandshakeException('Invalid non-token string in subprotocol '
- 'name: %r' % rest)
+
+ # Parameter should be encoded HTTP token.
+ state = http_header_util.ParsingState(subprotocol)
+ token = http_header_util.consume_token(state)
+ rest = http_header_util.peek(state)
+ # If |rest| is not None, |subprotocol| is not one token or invalid. If
+ # |rest| is None, |token| must not be None because |subprotocol| is
+ # concatenation of |token| and |rest| and is not None.
+ if rest is not None:
+ raise HandshakeException('Invalid non-token string in subprotocol '
+ 'name: %r' % rest)
def parse_host_header(request):
- fields = request.headers_in['Host'].split(':', 1)
+ fields = request.headers_in[common.HOST_HEADER].split(':', 1)
if len(fields) == 1:
return fields[0], get_default_port(request.is_https())
try:
@@ -132,27 +119,6 @@ def format_header(name, value):
return '%s: %s\r\n' % (name, value)
-def build_location(request):
- """Build WebSocket location for request."""
- location_parts = []
- if request.is_https():
- location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
- else:
- location_parts.append(common.WEB_SOCKET_SCHEME)
- location_parts.append('://')
- host, port = parse_host_header(request)
- connection_port = request.connection.local_addr[1]
- if port != connection_port:
- raise HandshakeException('Header/connection port mismatch: %d/%d' %
- (port, connection_port))
- location_parts.append(host)
- if (port != get_default_port(request.is_https())):
- location_parts.append(':')
- location_parts.append(str(port))
- location_parts.append(request.uri)
- return ''.join(location_parts)
-
-
def get_mandatory_header(request, key):
value = request.headers_in.get(key)
if value is None:
@@ -180,16 +146,6 @@ def check_request_line(request):
request.protocol)
-def check_header_lines(request, mandatory_headers):
- check_request_line(request)
-
- # The expected field names, and the meaning of their corresponding
- # values, are as follows.
- # |Upgrade| and |Connection|
- for key, expected_value in mandatory_headers:
- validate_mandatory_header(request, key, expected_value)
-
-
def parse_token_list(data):
"""Parses a header value which follows 1#token and returns parsed elements
as a list of strings.
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
index fc0e2a096..1d54a662d 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi.py
@@ -49,6 +49,7 @@ import re
from mod_pywebsocket import common
from mod_pywebsocket.extensions import get_extension_processor
+from mod_pywebsocket.extensions import is_compression_extension
from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
from mod_pywebsocket.handshake._base import get_mandatory_header
@@ -180,43 +181,59 @@ class Handshaker(object):
processors.append(processor)
self._request.ws_extension_processors = processors
+ # List of extra headers. The extra handshake handler may add header
+ # data as name/value pairs to this list and pywebsocket appends
+ # them to the WebSocket handshake.
+ self._request.extra_headers = []
+
# Extra handshake handler may modify/remove processors.
self._dispatcher.do_extra_handshake(self._request)
processors = filter(lambda processor: processor is not None,
self._request.ws_extension_processors)
+ # Ask each processor if there are extensions on the request which
+ # cannot co-exist. When processor decided other processors cannot
+ # co-exist with it, the processor marks them (or itself) as
+ # "inactive". The first extension processor has the right to
+ # make the final call.
+ for processor in reversed(processors):
+ if processor.is_active():
+ processor.check_consistency_with_other_processors(
+ processors)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
+
accepted_extensions = []
- # We need to take care of mux extension here. Extensions that
- # are placed before mux should be applied to logical channels.
+ # We need to take into account of mux extension here.
+ # If mux extension exists:
+ # - Remove processors of extensions for logical channel,
+ # which are processors located before the mux processor
+ # - Pass extension requests for logical channel to mux processor
+ # - Attach the mux processor to the request. It will be referred
+ # by dispatcher to see whether the dispatcher should use mux
+ # handler or not.
mux_index = -1
for i, processor in enumerate(processors):
if processor.name() == common.MUX_EXTENSION:
mux_index = i
break
if mux_index >= 0:
- mux_processor = processors[mux_index]
- logical_channel_processors = processors[:mux_index]
- processors = processors[mux_index+1:]
-
- for processor in logical_channel_processors:
- extension_response = processor.get_extension_response()
- if extension_response is None:
- # Rejected.
- continue
- accepted_extensions.append(extension_response)
- # Pass a shallow copy of accepted_extensions as extensions for
- # logical channels.
- mux_response = mux_processor.get_extension_response(
- self._request, accepted_extensions[:])
- if mux_response is not None:
- accepted_extensions.append(mux_response)
+ logical_channel_extensions = []
+ for processor in processors[:mux_index]:
+ logical_channel_extensions.append(processor.request())
+ processor.set_active(False)
+ self._request.mux_processor = processors[mux_index]
+ self._request.mux_processor.set_extensions(
+ logical_channel_extensions)
+ processors = filter(lambda processor: processor.is_active(),
+ processors)
stream_options = StreamOptions()
- # When there is mux extension, here, |processors| contain only
- # prosessors for extensions placed after mux.
- for processor in processors:
+ for index, processor in enumerate(processors):
+ if not processor.is_active():
+ continue
extension_response = processor.get_extension_response()
if extension_response is None:
@@ -227,6 +244,14 @@ class Handshaker(object):
processor.setup_stream_options(stream_options)
+ if not is_compression_extension(processor.name()):
+ continue
+
+ # Inactivate all of the following compression extensions.
+ for j in xrange(index + 1, len(processors)):
+ if is_compression_extension(processors[j].name()):
+ processors[j].set_active(False)
+
if len(accepted_extensions) > 0:
self._request.ws_extensions = accepted_extensions
self._logger.debug(
@@ -242,7 +267,7 @@ class Handshaker(object):
raise HandshakeException(
'do_extra_handshake must choose one subprotocol from '
'ws_requested_protocols and set it to ws_protocol')
- validate_subprotocol(self._request.ws_protocol, hixie=False)
+ validate_subprotocol(self._request.ws_protocol)
self._logger.debug(
'Subprotocol accepted: %r',
@@ -375,6 +400,7 @@ class Handshaker(object):
response.append('HTTP/1.1 101 Switching Protocols\r\n')
+ # WebSocket headers
response.append(format_header(
common.UPGRADE_HEADER, common.WEBSOCKET_UPGRADE_TYPE))
response.append(format_header(
@@ -390,6 +416,11 @@ class Handshaker(object):
response.append(format_header(
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
+
+ # Headers not specific for WebSocket
+ for name, value in self._request.extra_headers:
+ response.append(format_header(name, value))
+
response.append('\r\n')
return ''.join(response)
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
index cc6f8dc43..8757717a6 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/handshake/hybi00.py
@@ -51,11 +51,12 @@ from mod_pywebsocket import common
from mod_pywebsocket.stream import StreamHixie75
from mod_pywebsocket import util
from mod_pywebsocket.handshake._base import HandshakeException
-from mod_pywebsocket.handshake._base import build_location
-from mod_pywebsocket.handshake._base import check_header_lines
+from mod_pywebsocket.handshake._base import check_request_line
from mod_pywebsocket.handshake._base import format_header
+from mod_pywebsocket.handshake._base import get_default_port
from mod_pywebsocket.handshake._base import get_mandatory_header
-from mod_pywebsocket.handshake._base import validate_subprotocol
+from mod_pywebsocket.handshake._base import parse_host_header
+from mod_pywebsocket.handshake._base import validate_mandatory_header
_MANDATORY_HEADERS = [
@@ -65,6 +66,56 @@ _MANDATORY_HEADERS = [
]
+def _validate_subprotocol(subprotocol):
+ """Checks if characters in subprotocol are in range between U+0020 and
+ U+007E. A value in the Sec-WebSocket-Protocol field need to satisfy this
+ requirement.
+
+ See the Section 4.1. Opening handshake of the spec.
+ """
+
+ if not subprotocol:
+ raise HandshakeException('Invalid subprotocol name: empty')
+
+ # Parameter should be in the range U+0020 to U+007E.
+ for c in subprotocol:
+ if not 0x20 <= ord(c) <= 0x7e:
+ raise HandshakeException(
+ 'Illegal character in subprotocol name: %r' % c)
+
+
+def _check_header_lines(request, mandatory_headers):
+ check_request_line(request)
+
+ # The expected field names, and the meaning of their corresponding
+ # values, are as follows.
+ # |Upgrade| and |Connection|
+ for key, expected_value in mandatory_headers:
+ validate_mandatory_header(request, key, expected_value)
+
+
+def _build_location(request):
+ """Build WebSocket location for request."""
+
+ location_parts = []
+ if request.is_https():
+ location_parts.append(common.WEB_SOCKET_SECURE_SCHEME)
+ else:
+ location_parts.append(common.WEB_SOCKET_SCHEME)
+ location_parts.append('://')
+ host, port = parse_host_header(request)
+ connection_port = request.connection.local_addr[1]
+ if port != connection_port:
+ raise HandshakeException('Header/connection port mismatch: %d/%d' %
+ (port, connection_port))
+ location_parts.append(host)
+ if (port != get_default_port(request.is_https())):
+ location_parts.append(':')
+ location_parts.append(str(port))
+ location_parts.append(request.unparsed_uri)
+ return ''.join(location_parts)
+
+
class Handshaker(object):
"""Opening handshake processor for the WebSocket protocol version HyBi 00.
"""
@@ -101,7 +152,7 @@ class Handshaker(object):
# 5.1 Reading the client's opening handshake.
# dispatcher sets it in self._request.
- check_header_lines(self._request, _MANDATORY_HEADERS)
+ _check_header_lines(self._request, _MANDATORY_HEADERS)
self._set_resource()
self._set_subprotocol()
self._set_location()
@@ -121,14 +172,14 @@ class Handshaker(object):
subprotocol = self._request.headers_in.get(
common.SEC_WEBSOCKET_PROTOCOL_HEADER)
if subprotocol is not None:
- validate_subprotocol(subprotocol, hixie=True)
+ _validate_subprotocol(subprotocol)
self._request.ws_protocol = subprotocol
def _set_location(self):
# |Host|
host = self._request.headers_in.get(common.HOST_HEADER)
if host is not None:
- self._request.ws_location = build_location(self._request)
+ self._request.ws_location = _build_location(self._request)
# TODO(ukai): check host is this host.
def _set_origin(self):
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
index 2cc62de04..c244421cf 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/headerparserhandler.py
@@ -167,7 +167,9 @@ def _create_dispatcher():
handler_root, handler_scan, allow_handlers_outside_root)
for warning in dispatcher.source_warnings():
- apache.log_error('mod_pywebsocket: %s' % warning, apache.APLOG_WARNING)
+ apache.log_error(
+ 'mod_pywebsocket: Warning in source loading: %s' % warning,
+ apache.APLOG_WARNING)
return dispatcher
@@ -191,12 +193,16 @@ def headerparserhandler(request):
# Fallback to default http handler for request paths for which
# we don't have request handlers.
if not _dispatcher.get_handler_suite(request.uri):
- request.log_error('No handler for resource: %r' % request.uri,
- apache.APLOG_INFO)
- request.log_error('Fallback to Apache', apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: No handler for resource: %r' % request.uri,
+ apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Fallback to Apache', apache.APLOG_INFO)
return apache.DECLINED
except dispatch.DispatchException, e:
- request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Dispatch failed for error: %s' % e,
+ apache.APLOG_INFO)
if not handshake_is_done:
return e.status
@@ -210,26 +216,30 @@ def headerparserhandler(request):
handshake.do_handshake(
request, _dispatcher, allowDraft75=allow_draft75)
except handshake.VersionException, e:
- request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for version error: %s' % e,
+ apache.APLOG_INFO)
request.err_headers_out.add(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
return apache.HTTP_BAD_REQUEST
except handshake.HandshakeException, e:
# Handshake for ws/wss failed.
# Send http response with error status.
- request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ request.log_error(
+ 'mod_pywebsocket: Handshake failed for error: %s' % e,
+ apache.APLOG_INFO)
return e.status
handshake_is_done = True
request._dispatcher = _dispatcher
_dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
- request.log_error('mod_pywebsocket: %s' % e, apache.APLOG_INFO)
+ request.log_error('mod_pywebsocket: Aborted: %s' % e, apache.APLOG_INFO)
except Exception, e:
# DispatchException can also be thrown if something is wrong in
# pywebsocket code. It's caught here, then.
- request.log_error('mod_pywebsocket: %s\n%s' %
+ request.log_error('mod_pywebsocket: Exception occurred: %s\n%s' %
(e, util.get_stack_trace()),
apache.APLOG_ERR)
# Unknown exceptions before handshake mean Apache must handle its
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
index f0bdd2461..76334685b 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/mux.py
@@ -50,6 +50,7 @@ from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
+from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
@@ -94,10 +95,12 @@ _DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
+_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
-_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 3002
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
+_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
+_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
@@ -158,8 +161,7 @@ def _encode_number(number):
def _create_add_channel_response(channel_id, encoded_handshake,
- encoding=0, rejected=False,
- outer_frame_mask=False):
+ encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
@@ -169,12 +171,10 @@ def _create_add_channel_response(channel_id, encoded_handshake,
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
- payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
- return create_binary_frame(payload, mask=outer_frame_mask)
+ return block
-def _create_drop_channel(channel_id, code=None, message='',
- outer_frame_mask=False):
+def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
@@ -187,36 +187,31 @@ def _create_drop_channel(channel_id, code=None, message='',
reason_size = _encode_number(len(reason))
block += reason_size + reason
- payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
- return create_binary_frame(payload, mask=outer_frame_mask)
+ return block
-def _create_flow_control(channel_id, replenished_quota,
- outer_frame_mask=False):
+def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
- payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
- return create_binary_frame(payload, mask=outer_frame_mask)
+ return block
-def _create_new_channel_slot(slots, send_quota, outer_frame_mask=False):
+def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
- payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
- return create_binary_frame(payload, mask=outer_frame_mask)
+ return block
-def _create_fallback_new_channel_slot(outer_frame_mask=False):
+def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
- payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + block
- return create_binary_frame(payload, mask=outer_frame_mask)
+ return block
def _parse_request_text(request_text):
@@ -318,44 +313,34 @@ class _MuxFramePayloadParser(object):
def _read_number(self):
if self._read_position + 1 > len(self._data):
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
- 'Invalid number field')
+ raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
- 'Encoded number >= 2^63')
+ raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
- 'Invalid number field')
+ raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
- raise PhysicalConnectionError(
- _DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
@@ -366,7 +351,11 @@ class _MuxFramePayloadParser(object):
- the contents.
"""
- size = self._read_number()
+ try:
+ size = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
@@ -419,9 +408,11 @@ class _MuxFramePayloadParser(object):
try:
control_block.channel_id = self.read_channel_id()
+ control_block.send_quota = self._read_number()
except ValueError, e:
- raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
- control_block.send_quota = self._read_number()
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
+
return control_block
def _read_drop_channel(self, first_byte, control_block):
@@ -455,8 +446,12 @@ class _MuxFramePayloadParser(object):
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
- control_block.slots = self._read_number()
- control_block.send_quota = self._read_number()
+ try:
+ control_block.slots = self._read_number()
+ control_block.send_quota = self._read_number()
+ except ValueError, e:
+ raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
+ str(e))
return control_block
def read_control_blocks(self):
@@ -549,8 +544,12 @@ class _LogicalConnection(object):
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
+
+ # - Protects _waiting_write_completion
+ # - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
+
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
@@ -594,6 +593,7 @@ class _LogicalConnection(object):
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
+ # TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
@@ -607,20 +607,31 @@ class _LogicalConnection(object):
self._mux_handler.send_control_data(data)
- def notify_write_done(self):
+ def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
- 'Invalid call of notify_write_done for logical connection'
- ' %d' % self._channel_id)
+ 'Invalid call of on_write_data_done for logical '
+ 'connection %d' % self._channel_id)
+ self._waiting_write_completion = False
+ self._write_condition.notify()
+ finally:
+ self._write_condition.release()
+
+ def on_writer_done(self):
+ """Called by the mux handler when the writer thread has finished."""
+
+ try:
+ self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
+
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
@@ -686,37 +697,162 @@ class _LogicalConnection(object):
self._read_condition.release()
+class _InnerMessage(object):
+ """Holds the result of _InnerMessageBuilder.build().
+ """
+
+ def __init__(self, opcode, payload):
+ self.opcode = opcode
+ self.payload = payload
+
+
+class _InnerMessageBuilder(object):
+ """A class that holds the context of inner message fragmentation and
+ builds a message from fragmented inner frame(s).
+ """
+
+ def __init__(self):
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+
+ def _handle_first(self, frame):
+ if frame.opcode == common.OPCODE_CONTINUATION:
+ raise InvalidFrameException('Sending invalid continuation opcode')
+
+ if common.is_control_opcode(frame.opcode):
+ return self._process_first_fragmented_control(frame)
+ else:
+ return self._process_first_fragmented_message(frame)
+
+ def _process_first_fragmented_control(self, frame):
+ self._control_opcode = frame.opcode
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_control
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _process_first_fragmented_message(self, frame):
+ self._message_opcode = frame.opcode
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ self._frame_handler = self._handle_fragmented_message
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _handle_fragmented_control(self, frame):
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented control '
+ 'message' % frame.opcode)
+ self._pending_control_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_control()
+
+ def _reassemble_fragmented_control(self):
+ opcode = self._control_opcode
+ payload = ''.join(self._pending_control_fragments)
+ self._control_opcode = None
+ self._pending_control_fragments = []
+ if self._message_opcode is not None:
+ self._frame_handler = self._handle_fragmented_message
+ else:
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def _handle_fragmented_message(self, frame):
+ # Sender can interleave a control message while sending fragmented
+ # messages.
+ if common.is_control_opcode(frame.opcode):
+ if self._control_opcode is not None:
+ raise MuxUnexpectedException(
+ 'Should not reach here(Bug in builder)')
+ return self._process_first_fragmented_control(frame)
+
+ if frame.opcode != common.OPCODE_CONTINUATION:
+ raise InvalidFrameException(
+ 'Sending invalid opcode %d while sending fragmented message' %
+ frame.opcode)
+ self._pending_message_fragments.append(frame.payload)
+ if not frame.fin:
+ return None
+ return self._reassemble_fragmented_message()
+
+ def _reassemble_fragmented_message(self):
+ opcode = self._message_opcode
+ payload = ''.join(self._pending_message_fragments)
+ self._message_opcode = None
+ self._pending_message_fragments = []
+ self._frame_handler = self._handle_first
+ return _InnerMessage(opcode, payload)
+
+ def build(self, frame):
+ """Build an inner message. Returns an _InnerMessage instance when
+ the given frame is the last fragmented frame. Returns None otherwise.
+
+ Args:
+ frame: an inner frame.
+ Raises:
+ InvalidFrameException: when received invalid opcode. (e.g.
+ receiving non continuation data opcode but the fin flag of
+ the previous inner frame was not set.)
+ """
+
+ return self._frame_handler(frame)
+
+
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
- def __init__(self, request, send_quota, receive_quota):
+ def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
+ stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
- # TODO(bashi): Support frame filters.
- stream_options = StreamOptions()
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
- # Control frames can be fragmented on logical channel.
- stream_options.allow_fragmented_control_frame = True
Stream.__init__(self, request, stream_options)
+
+ self._send_closed = False
self._send_quota = send_quota
- self._send_quota_condition = threading.Condition()
+ # - Protects _send_closed and _send_quota
+ # - Signals the thread waiting for send quota replenished
+ self._send_condition = threading.Condition()
+
+ # The opcode of the first frame in messages.
+ self._message_opcode = common.OPCODE_TEXT
+ # True when the last message was fragmented.
+ self._last_message_was_fragmented = False
+
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
+ self._inner_message_builder = _InnerMessageBuilder()
+
def _create_inner_frame(self, opcode, payload, end=True):
- # TODO(bashi): Support extensions that use reserved bits.
- first_byte = (end << 7) | opcode
- return (_encode_channel_id(self._request.channel_id) +
- chr(first_byte) + payload)
+ frame = Frame(fin=end, opcode=opcode, payload=payload)
+ for frame_filter in self._options.outgoing_frame_filters:
+ frame_filter.filter(frame)
+
+ if len(payload) != len(frame.payload):
+ raise MuxUnexpectedException(
+ 'Mux extension must not be used after extensions which change '
+ ' frame boundary')
+
+ first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
+ (frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
+ return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
@@ -730,14 +866,36 @@ class _LogicalStream(Stream):
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
+
+ # Consume an octet quota when this is the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ try:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self._request.channel_id)
+
+ self._send_quota -= 1
+ finally:
+ self._send_condition.release()
+
while write_position < payload_length:
try:
- self._send_quota_condition.acquire()
- while self._send_quota == 0:
+ self._send_condition.acquire()
+ while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
- self._send_quota_condition.wait()
+ self._send_condition.wait()
+
+ if self._send_closed:
+ raise BadOperationException(
+ 'Logical connection %d is closed' %
+ self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
@@ -749,18 +907,16 @@ class _LogicalStream(Stream):
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
- frame_data = self._writer.build(
- inner_frame, end=True, binary=True)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
- self._send_quota_condition.release()
+ self._send_condition.release()
# Writing data will block the worker so we need to release
- # _send_quota_condition before writing.
- self._logger.debug('Sending inner frame: %r' % frame_data)
- self._request.connection.write(frame_data)
+ # _send_condition before writing.
+ self._logger.debug('Sending inner frame: %r' % inner_frame)
+ self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
@@ -773,12 +929,18 @@ class _LogicalStream(Stream):
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
- self._send_quota_condition.acquire()
- self._send_quota += send_quota
- self._logger.debug('Replenished send quota for channel id %d: %d' %
- (self._request.channel_id, self._send_quota))
- self._send_quota_condition.notify()
- self._send_quota_condition.release()
+ try:
+ self._send_condition.acquire()
+ if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
+ self._send_quota = 0
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
+ self._send_quota += send_quota
+ self._logger.debug('Replenished send quota for channel id %d: %d' %
+ (self._request.channel_id, self._send_quota))
+ finally:
+ self._send_condition.notify()
+ self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
@@ -808,7 +970,19 @@ class _LogicalStream(Stream):
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
+ for message_filter in self._options.outgoing_message_filters:
+ message = message_filter.filter(message, end, binary)
+
+ if self._last_message_was_fragmented:
+ if opcode != self._message_opcode:
+ raise BadOperationException('Message types are different in '
+ 'frames for the same message')
+ opcode = common.OPCODE_CONTINUATION
+ else:
+ self._message_opcode = opcode
+
self._write_inner_frame(opcode, message, end)
+ self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
@@ -821,6 +995,9 @@ class _LogicalStream(Stream):
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
+ # Replenish extra one octet when receiving the first fragmented frame.
+ if opcode != common.OPCODE_CONTINUATION:
+ amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
@@ -829,6 +1006,21 @@ class _LogicalStream(Stream):
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
+ def _get_message_from_frame(self, frame):
+ """Overrides Stream._get_message_from_frame.
+ """
+
+ try:
+ inner_message = self._inner_message_builder.build(frame)
+ except InvalidFrameException:
+ raise LogicalChannelError(
+ self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
+
+ if inner_message is None:
+ return None
+ self._original_opcode = inner_message.opcode
+ return inner_message.payload
+
def receive_message(self):
"""Overrides Stream.receive_message."""
@@ -875,12 +1067,13 @@ class _LogicalStream(Stream):
self._request.channel_id)
self._request.server_terminated = True
- def _drain_received_data(self):
- """Overrides Stream._drain_received_data. Nothing need to be done for
- logical channel.
- """
+ def stop_sending(self):
+ """Stops accepting new send operation (_write_inner_frame)."""
- pass
+ self._send_condition.acquire()
+ self._send_closed = True
+ self._send_condition.notify()
+ self._send_condition.release()
class _OutgoingData(object):
@@ -911,8 +1104,17 @@ class _PhysicalConnectionWriter(threading.Thread):
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
+
+ # When set, make this thread stop accepting new data, flush pending
+ # data and exit.
self._stop_requested = False
+ # The close code of the physical connection.
+ self._close_code = common.STATUS_NORMAL_CLOSURE
+ # Deque for passing write data. It's protected by _deque_condition
+ # until _stop_requested is set.
self._deque = collections.deque()
+ # - Protects _deque, _stop_requested and _close_code
+ # - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
@@ -937,8 +1139,11 @@ class _PhysicalConnectionWriter(threading.Thread):
self._deque_condition.release()
def _write_data(self, outgoing_data):
+ message = (_encode_channel_id(outgoing_data.channel_id) +
+ outgoing_data.data)
try:
- self._mux_handler.physical_connection.write(outgoing_data.data)
+ self._mux_handler.physical_stream.send_message(
+ message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
@@ -948,33 +1153,51 @@ class _PhysicalConnectionWriter(threading.Thread):
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
- self._mux_handler.notify_write_done(outgoing_data.channel_id)
+ self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
- self._deque_condition.acquire()
- while not self._stop_requested:
- if len(self._deque) == 0:
- self._deque_condition.wait()
- continue
-
- outgoing_data = self._deque.popleft()
- self._deque_condition.release()
- self._write_data(outgoing_data)
+ try:
self._deque_condition.acquire()
+ while not self._stop_requested:
+ if len(self._deque) == 0:
+ self._deque_condition.wait()
+ continue
- # Flush deque
- try:
- while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
+
+ self._deque_condition.release()
self._write_data(outgoing_data)
+ self._deque_condition.acquire()
+
+ # Flush deque.
+ #
+ # At this point, self._deque_condition is always acquired.
+ try:
+ while len(self._deque) > 0:
+ outgoing_data = self._deque.popleft()
+ self._write_data(outgoing_data)
+ finally:
+ self._deque_condition.release()
+
+ # Close physical connection.
+ try:
+ # Don't wait the response here. The response will be read
+ # by the reader thread.
+ self._mux_handler.physical_stream.close_connection(
+ self._close_code, wait_response=False)
+ except Exception, e:
+ util.prepend_message_to_exception(
+ 'Failed to close the physical connection: %r' % e)
+ raise
finally:
- self._deque_condition.release()
+ self._mux_handler.notify_writer_done()
- def stop(self):
+ def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
+ self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
@@ -1055,6 +1278,9 @@ class _Worker(threading.Thread):
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
+ except LogicalChannelError, e:
+ self._mux_handler.fail_logical_channel(
+ e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
@@ -1083,8 +1309,6 @@ class _MuxHandshaker(hybi.Handshaker):
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
- request.headers_in[common.CONNECTION_HEADER] = (
- common.UPGRADE_CONNECTION_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
@@ -1095,8 +1319,9 @@ class _MuxHandshaker(hybi.Handshaker):
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
- return _LogicalStream(self._request, self._send_quota,
- self._receive_quota)
+ return _LogicalStream(
+ self._request, stream_options, self._send_quota,
+ self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
@@ -1105,7 +1330,9 @@ class _MuxHandshaker(hybi.Handshaker):
response.append('HTTP/1.1 101 Switching Protocols\r\n')
- # Upgrade, Connection and Sec-WebSocket-Accept should be excluded.
+ # Upgrade and Sec-WebSocket-Accept should be excluded.
+ response.append('%s: %s\r\n' % (
+ common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
@@ -1169,8 +1396,6 @@ class _HandshakeDeltaBase(object):
del headers[key]
else:
headers[key] = value
- # TODO(bashi): Support extensions
- headers['Sec-WebSocket-Extensions'] = ''
return headers
@@ -1232,8 +1457,12 @@ class _MuxHandler(object):
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
- self._handshake_base = _HandshakeDeltaBase(
- self.original_request.headers_in)
+ headers = copy.copy(self.original_request.headers_in)
+ # Add extensions for logical channel.
+ headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
+ common.format_extensions(
+ self.original_request.mux_processor.extensions()))
+ self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
@@ -1245,8 +1474,9 @@ class _MuxHandler(object):
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
+ send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
- logical_request, send_quota=self.original_request.mux_quota):
+ logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
@@ -1287,7 +1517,6 @@ class _MuxHandler(object):
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
-
finally:
self._logical_channels_condition.release()
@@ -1297,7 +1526,7 @@ class _MuxHandler(object):
return True
- def notify_write_done(self, channel_id):
+ def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
@@ -1308,7 +1537,7 @@ class _MuxHandler(object):
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
- channel_data.request.connection.notify_write_done()
+ channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
@@ -1469,9 +1698,11 @@ class _MuxHandler(object):
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
+
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
@@ -1506,8 +1737,11 @@ class _MuxHandler(object):
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
+ consuming_byte = len(payload)
+ if opcode != common.OPCODE_CONTINUATION:
+ consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
- len(payload)):
+ consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
@@ -1569,15 +1803,32 @@ class _MuxHandler(object):
finished.
"""
- # Terminate all logical connections
- self._logger.debug('termiating all logical connections...')
+ self._logger.debug(
+ 'Termiating all logical connections waiting for incoming data '
+ '...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
- pass
+ self._logger.debug(traceback.format_exc())
+ self._logical_channels_condition.release()
+
+ def notify_writer_done(self):
+ """This method is called by the writer thread when the writer has
+ finished.
+ """
+
+ self._logger.debug(
+ 'Termiating all logical connections waiting for write '
+ 'completion ...')
+ self._logical_channels_condition.acquire()
+ for channel_data in self._logical_channels.values():
+ try:
+ channel_data.request.connection.on_writer_done()
+ except Exception:
+ self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
@@ -1590,8 +1841,7 @@ class _MuxHandler(object):
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
- self.physical_stream.close_connection(
- common.STATUS_INTERNAL_ENDPOINT_ERROR)
+ self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
@@ -1611,8 +1861,10 @@ class _MuxHandler(object):
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
+
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
+ channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
@@ -1620,7 +1872,8 @@ class _MuxHandler(object):
def use_mux(request):
- return hasattr(request, 'mux') and request.mux
+ return hasattr(request, 'mux_processor') and (
+ request.mux_processor.is_active())
def start(request, dispatcher):
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
index 07a33d9c9..2bf3b0c28 100755
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/standalone.py
@@ -76,6 +76,9 @@ SUPPORTING TLS
To support TLS, run standalone.py with -t, -k, and -c options.
+Note that when ssl module is used and the key/cert location is incorrect,
+TLS connection silently fails while pyOpenSSL fails on startup.
+
SUPPORTING CLIENT AUTHENTICATION
@@ -140,18 +143,6 @@ import sys
import threading
import time
-_HAS_SSL = False
-_HAS_OPEN_SSL = False
-try:
- import ssl
- _HAS_SSL = True
-except ImportError:
- try:
- import OpenSSL.SSL
- _HAS_OPEN_SSL = True
- except ImportError:
- pass
-
from mod_pywebsocket import common
from mod_pywebsocket import dispatch
from mod_pywebsocket import handshake
@@ -168,6 +159,10 @@ _DEFAULT_REQUEST_QUEUE_SIZE = 128
# 1024 is practically large enough to contain WebSocket handshake lines.
_MAX_MEMORIZED_LINES = 1024
+# Constants for the --tls_module flag.
+_TLS_BY_STANDARD_MODULE = 'ssl'
+_TLS_BY_PYOPENSSL = 'pyopenssl'
+
class _StandaloneConnection(object):
"""Mimic mod_python mp_conn."""
@@ -231,11 +226,23 @@ class _StandaloneRequest(object):
self.headers_in = request_handler.headers
def get_uri(self):
- """Getter to mimic request.uri."""
+ """Getter to mimic request.uri.
+
+ This method returns the raw data at the Request-URI part of the
+ Request-Line, while the uri method on the request object of mod_python
+ returns the path portion after parsing the raw data. This behavior is
+ kept for compatibility.
+ """
return self._request_handler.path
uri = property(get_uri)
+ def get_unparsed_uri(self):
+ """Getter to mimic request.unparsed_uri."""
+
+ return self._request_handler.path
+ unparsed_uri = property(get_unparsed_uri)
+
def get_method(self):
"""Getter to mimic request.method."""
@@ -253,39 +260,68 @@ class _StandaloneRequest(object):
return self._use_tls
- def _drain_received_data(self):
- """Don't use this method from WebSocket handler. Drains unread data
- in the receive buffer.
- """
- raw_socket = self._request_handler.connection
- drained_data = util.drain_received_data(raw_socket)
+def _import_ssl():
+ global ssl
+ try:
+ import ssl
+ return True
+ except ImportError:
+ return False
+
- if drained_data:
- self._logger.debug(
- 'Drained data following close frame: %r', drained_data)
+def _import_pyopenssl():
+ global OpenSSL
+ try:
+ import OpenSSL.SSL
+ return True
+ except ImportError:
+ return False
class _StandaloneSSLConnection(object):
- """A wrapper class for OpenSSL.SSL.Connection to provide makefile method
- which is not supported by the class.
+ """A wrapper class for OpenSSL.SSL.Connection to
+ - provide makefile method which is not supported by the class
+ - tweak shutdown method since OpenSSL.SSL.Connection.shutdown doesn't
+ accept the "how" argument.
+ - convert SysCallError exceptions that its recv method may raise into a
+ return value of '', meaning EOF. We cannot overwrite the recv method on
+ self._connection since it's immutable.
"""
+ _OVERRIDDEN_ATTRIBUTES = ['_connection', 'makefile', 'shutdown', 'recv']
+
def __init__(self, connection):
self._connection = connection
def __getattribute__(self, name):
- if name in ('_connection', 'makefile'):
+ if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__getattribute__(self, name)
return self._connection.__getattribute__(name)
def __setattr__(self, name, value):
- if name in ('_connection', 'makefile'):
+ if name in _StandaloneSSLConnection._OVERRIDDEN_ATTRIBUTES:
return object.__setattr__(self, name, value)
return self._connection.__setattr__(name, value)
def makefile(self, mode='r', bufsize=-1):
- return socket._fileobject(self._connection, mode, bufsize)
+ return socket._fileobject(self, mode, bufsize)
+
+ def shutdown(self, unused_how):
+ self._connection.shutdown()
+
+ def recv(self, bufsize, flags=0):
+ if flags != 0:
+ raise ValueError('Non-zero flags not allowed')
+
+ try:
+ return self._connection.recv(bufsize)
+ except OpenSSL.SSL.SysCallError, (err, message):
+ if err == -1:
+ # Suppress "unexpected EOF" exception. See the OpenSSL document
+ # for SSL_get_error.
+ return ''
+ raise
def _alias_handlers(dispatcher, websock_handlers_map_file):
@@ -340,7 +376,7 @@ class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
warnings = options.dispatcher.source_warnings()
if warnings:
for warning in warnings:
- logging.warning('mod_pywebsocket: %s' % warning)
+ logging.warning('Warning in source loading: %s' % warning)
self._logger = util.get_class_logger(self)
@@ -387,25 +423,25 @@ class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
except Exception, e:
self._logger.info('Skip by failure: %r', e)
continue
- if self.websocket_server_options.use_tls:
- if _HAS_SSL:
- if self.websocket_server_options.tls_client_auth:
- client_cert_ = ssl.CERT_REQUIRED
+ server_options = self.websocket_server_options
+ if server_options.use_tls:
+ # For the case of _HAS_OPEN_SSL, we do wrapper setup after
+ # accept.
+ if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if server_options.tls_client_auth:
+ if server_options.tls_client_cert_optional:
+ client_cert_ = ssl.CERT_OPTIONAL
+ else:
+ client_cert_ = ssl.CERT_REQUIRED
else:
client_cert_ = ssl.CERT_NONE
socket_ = ssl.wrap_socket(socket_,
- keyfile=self.websocket_server_options.private_key,
- certfile=self.websocket_server_options.certificate,
+ keyfile=server_options.private_key,
+ certfile=server_options.certificate,
ssl_version=ssl.PROTOCOL_SSLv23,
- ca_certs=self.websocket_server_options.tls_client_ca,
- cert_reqs=client_cert_)
- if _HAS_OPEN_SSL:
- ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
- ctx.use_privatekey_file(
- self.websocket_server_options.private_key)
- ctx.use_certificate_file(
- self.websocket_server_options.certificate)
- socket_ = OpenSSL.SSL.Connection(ctx, socket_)
+ ca_certs=server_options.tls_client_ca,
+ cert_reqs=client_cert_,
+ do_handshake_on_connect=False)
self._sockets.append((socket_, addrinfo))
def server_bind(self):
@@ -479,7 +515,7 @@ class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
self._logger.critical('Not supported: fileno')
return self._sockets[0][0].fileno()
- def handle_error(self, rquest, client_address):
+ def handle_error(self, request, client_address):
"""Override SocketServer.handle_error."""
self._logger.error(
@@ -496,8 +532,63 @@ class WebSocketServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""
accepted_socket, client_address = self.socket.accept()
- if self.websocket_server_options.use_tls and _HAS_OPEN_SSL:
- accepted_socket = _StandaloneSSLConnection(accepted_socket)
+
+ server_options = self.websocket_server_options
+ if server_options.use_tls:
+ if server_options.tls_module == _TLS_BY_STANDARD_MODULE:
+ try:
+ accepted_socket.do_handshake()
+ except ssl.SSLError, e:
+ self._logger.debug('%r', e)
+ raise
+
+ # Print cipher in use. Handshake is done on accept.
+ self._logger.debug('Cipher: %s', accepted_socket.cipher())
+ self._logger.debug('Client cert: %r',
+ accepted_socket.getpeercert())
+ elif server_options.tls_module == _TLS_BY_PYOPENSSL:
+ # We cannot print the cipher in use. pyOpenSSL doesn't provide
+ # any method to fetch that.
+
+ ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
+ ctx.use_privatekey_file(server_options.private_key)
+ ctx.use_certificate_file(server_options.certificate)
+
+ def default_callback(conn, cert, errnum, errdepth, ok):
+ return ok == 1
+
+ # See the OpenSSL document for SSL_CTX_set_verify.
+ if server_options.tls_client_auth:
+ verify_mode = OpenSSL.SSL.VERIFY_PEER
+ if not server_options.tls_client_cert_optional:
+ verify_mode |= OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT
+ ctx.set_verify(verify_mode, default_callback)
+ ctx.load_verify_locations(server_options.tls_client_ca,
+ None)
+ else:
+ ctx.set_verify(OpenSSL.SSL.VERIFY_NONE, default_callback)
+
+ accepted_socket = OpenSSL.SSL.Connection(ctx, accepted_socket)
+ accepted_socket.set_accept_state()
+
+ # Convert SSL related error into socket.error so that
+ # SocketServer ignores them and keeps running.
+ #
+ # TODO(tyoshino): Convert all kinds of errors.
+ try:
+ accepted_socket.do_handshake()
+ except OpenSSL.SSL.Error, e:
+ # Set errno part to 1 (SSL_ERROR_SSL) like the ssl module
+ # does.
+ self._logger.debug('%r', e)
+ raise socket.error(1, '%r' % e)
+ cert = accepted_socket.get_peer_certificate()
+ self._logger.debug('Client cert subject: %r',
+ cert.get_subject().get_components())
+ accepted_socket = _StandaloneSSLConnection(accepted_socket)
+ else:
+ raise ValueError('No TLS support module is available')
+
return accepted_socket, client_address
def serve_forever(self, poll_interval=0.5):
@@ -636,7 +727,7 @@ class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
self._logger.info('Fallback to CGIHTTPRequestHandler')
return True
except dispatch.DispatchException, e:
- self._logger.info('%s', e)
+ self._logger.info('Dispatch failed for error: %s', e)
self.send_error(e.status)
return False
@@ -652,7 +743,7 @@ class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
allowDraft75=self._options.allow_draft75,
strict=self._options.strict)
except handshake.VersionException, e:
- self._logger.info('%s', e)
+ self._logger.info('Handshake failed for version error: %s', e)
self.send_response(common.HTTP_STATUS_BAD_REQUEST)
self.send_header(common.SEC_WEBSOCKET_VERSION_HEADER,
e.supported_versions)
@@ -660,14 +751,14 @@ class WebSocketRequestHandler(CGIHTTPServer.CGIHTTPRequestHandler):
return False
except handshake.HandshakeException, e:
# Handshake for ws(s) failed.
- self._logger.info('%s', e)
+ self._logger.info('Handshake failed for error: %s', e)
self.send_error(e.status)
return False
request._dispatcher = self._options.dispatcher
self._options.dispatcher.transfer_data(request)
except handshake.AbortedByUserException, e:
- self._logger.info('%s', e)
+ self._logger.info('Aborted: %s', e)
return False
def log_request(self, code='-', size='-'):
@@ -799,6 +890,12 @@ def _build_option_parser():
'as CGI programs. Must be executable.'))
parser.add_option('-t', '--tls', dest='use_tls', action='store_true',
default=False, help='use TLS (wss://)')
+ parser.add_option('--tls-module', '--tls_module', dest='tls_module',
+ type='choice',
+ choices = [_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL],
+ help='Use ssl module if "%s" is specified. '
+ 'Use pyOpenSSL module if "%s" is specified' %
+ (_TLS_BY_STANDARD_MODULE, _TLS_BY_PYOPENSSL))
parser.add_option('-k', '--private-key', '--private_key',
dest='private_key',
default='', help='TLS private key file.')
@@ -806,7 +903,12 @@ def _build_option_parser():
default='', help='TLS certificate file.')
parser.add_option('--tls-client-auth', dest='tls_client_auth',
action='store_true', default=False,
- help='Requires TLS client auth on every connection.')
+ help='Requests TLS client auth on every connection.')
+ parser.add_option('--tls-client-cert-optional',
+ dest='tls_client_cert_optional',
+ action='store_true', default=False,
+ help=('Makes client certificate optional even though '
+ 'TLS client auth is enabled.'))
parser.add_option('--tls-client-ca', dest='tls_client_ca', default='',
help=('Specifies a pem file which contains a set of '
'concatenated CA certificates which are used to '
@@ -933,6 +1035,12 @@ def _main(args=None):
_configure_logging(options)
+ if options.allow_draft75:
+ logging.warning('--allow_draft75 option is obsolete.')
+
+ if options.strict:
+ logging.warning('--strict option is obsolete.')
+
# TODO(tyoshino): Clean up initialization of CGI related values. Move some
# of code here to WebSocketRequestHandler class if it's better.
options.cgi_directories = []
@@ -955,20 +1063,53 @@ def _main(args=None):
options.is_executable_method = __check_script
if options.use_tls:
- if not (_HAS_SSL or _HAS_OPEN_SSL):
- logging.critical('TLS support requires ssl or pyOpenSSL module.')
+ if options.tls_module is None:
+ if _import_ssl():
+ options.tls_module = _TLS_BY_STANDARD_MODULE
+ logging.debug('Using ssl module')
+ elif _import_pyopenssl():
+ options.tls_module = _TLS_BY_PYOPENSSL
+ logging.debug('Using pyOpenSSL module')
+ else:
+ logging.critical(
+ 'TLS support requires ssl or pyOpenSSL module.')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_STANDARD_MODULE:
+ if not _import_ssl():
+ logging.critical('ssl module is not available')
+ sys.exit(1)
+ elif options.tls_module == _TLS_BY_PYOPENSSL:
+ if not _import_pyopenssl():
+ logging.critical('pyOpenSSL module is not available')
+ sys.exit(1)
+ else:
+ logging.critical('Invalid --tls-module option: %r',
+ options.tls_module)
sys.exit(1)
+
if not options.private_key or not options.certificate:
logging.critical(
'To use TLS, specify private_key and certificate.')
sys.exit(1)
- if options.tls_client_auth:
- if not options.use_tls:
+ if (options.tls_client_cert_optional and
+ not options.tls_client_auth):
+ logging.critical('Client authentication must be enabled to '
+ 'specify tls_client_cert_optional')
+ sys.exit(1)
+ else:
+ if options.tls_module is not None:
+ logging.critical('Use --tls-module option only together with '
+ '--use-tls option.')
+ sys.exit(1)
+
+ if options.tls_client_auth:
+ logging.critical('TLS must be enabled for client authentication.')
+ sys.exit(1)
+
+ if options.tls_client_cert_optional:
logging.critical('TLS must be enabled for client authentication.')
sys.exit(1)
- if not _HAS_SSL:
- logging.critical('Client authentication requires ssl module.')
if not options.scan_dir:
options.scan_dir = options.websock_handlers
diff --git a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
index 7bb0b5d9e..adaca3a08 100644
--- a/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
+++ b/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/util.py
@@ -56,6 +56,11 @@ import socket
import traceback
import zlib
+try:
+ from mod_pywebsocket import fast_masking
+except ImportError:
+ pass
+
def get_stack_trace():
"""Get the current stack trace as string.
@@ -169,45 +174,39 @@ class RepeatedXorMasker(object):
ended and resumes from that point on the next mask method call.
"""
- def __init__(self, mask):
- self._mask = map(ord, mask)
- self._mask_size = len(self._mask)
- self._count = 0
+ def __init__(self, masking_key):
+ self._masking_key = masking_key
+ self._masking_key_index = 0
- def mask(self, s):
+ def _mask_using_swig(self, s):
+ masked_data = fast_masking.mask(
+ s, self._masking_key, self._masking_key_index)
+ self._masking_key_index = (
+ (self._masking_key_index + len(s)) % len(self._masking_key))
+ return masked_data
+
+ def _mask_using_array(self, s):
result = array.array('B')
result.fromstring(s)
+
# Use temporary local variables to eliminate the cost to access
# attributes
- count = self._count
- mask = self._mask
- mask_size = self._mask_size
- for i in xrange(len(result)):
- result[i] ^= mask[count]
- count = (count + 1) % mask_size
- self._count = count
+ masking_key = map(ord, self._masking_key)
+ masking_key_size = len(masking_key)
+ masking_key_index = self._masking_key_index
- return result.tostring()
-
-
-class DeflateRequest(object):
- """A wrapper class for request object to intercept send and recv to perform
- deflate compression and decompression transparently.
- """
+ for i in xrange(len(result)):
+ result[i] ^= masking_key[masking_key_index]
+ masking_key_index = (masking_key_index + 1) % masking_key_size
- def __init__(self, request):
- self._request = request
- self.connection = DeflateConnection(request.connection)
+ self._masking_key_index = masking_key_index
- def __getattribute__(self, name):
- if name in ('_request', 'connection'):
- return object.__getattribute__(self, name)
- return self._request.__getattribute__(name)
+ return result.tostring()
- def __setattr__(self, name, value):
- if name in ('_request', 'connection'):
- return object.__setattr__(self, name, value)
- return self._request.__setattr__(name, value)
+ if 'fast_masking' in globals():
+ mask = _mask_using_swig
+ else:
+ mask = _mask_using_array
# By making wbits option negative, we can suppress CMF/FLG (2 octet) and
@@ -252,6 +251,7 @@ class _Deflater(object):
self._logger.debug('Compress result %r', compressed_bytes)
return compressed_bytes
+
class _Inflater(object):
def __init__(self):
@@ -346,6 +346,7 @@ class _RFC1979Deflater(object):
return self._deflater.compress_and_flush(bytes)[:-4]
return self._deflater.compress(bytes)
+
class _RFC1979Inflater(object):
"""A decompressor class for byte sequence compressed and flushed following
the algorithm described in the RFC1979 section 2.1.
@@ -405,111 +406,4 @@ class DeflateSocket(object):
return len(bytes)
-class DeflateConnection(object):
- """A wrapper class for request object to intercept write and read to
- perform deflate compression and decompression transparently.
- """
-
- def __init__(self, connection):
- self._connection = connection
-
- self._logger = get_class_logger(self)
-
- self._deflater = _Deflater(zlib.MAX_WBITS)
- self._inflater = _Inflater()
-
- def get_remote_addr(self):
- return self._connection.remote_addr
- remote_addr = property(get_remote_addr)
-
- def put_bytes(self, bytes):
- self.write(bytes)
-
- def read(self, size=-1):
- """Reads at most size bytes. Blocks until there's at least one byte
- available.
- """
-
- # TODO(tyoshino): Allow call with size=0.
- if not (size == -1 or size > 0):
- raise Exception('size must be -1 or positive')
-
- data = ''
- while True:
- if size == -1:
- data += self._inflater.decompress(-1)
- else:
- data += self._inflater.decompress(size - len(data))
-
- if size >= 0 and len(data) != 0:
- break
-
- # TODO(tyoshino): Make this read efficient by some workaround.
- #
- # In 3.0.3 and prior of mod_python, read blocks until length bytes
- # was read. We don't know the exact size to read while using
- # deflate, so read byte-by-byte.
- #
- # _StandaloneRequest.read that ultimately performs
- # socket._fileobject.read also blocks until length bytes was read
- read_data = self._connection.read(1)
- if not read_data:
- break
- self._inflater.append(read_data)
- return data
-
- def write(self, bytes):
- self._connection.write(self._deflater.compress_and_flush(bytes))
-
-
-def _is_ewouldblock_errno(error_number):
- """Returns True iff error_number indicates that receive operation would
- block. To make this portable, we check availability of errno and then
- compare them.
- """
-
- for error_name in ['WSAEWOULDBLOCK', 'EWOULDBLOCK', 'EAGAIN']:
- if (error_name in dir(errno) and
- error_number == getattr(errno, error_name)):
- return True
- return False
-
-
-def drain_received_data(raw_socket):
- # Set the socket non-blocking.
- original_timeout = raw_socket.gettimeout()
- raw_socket.settimeout(0.0)
-
- drained_data = []
-
- # Drain until the socket is closed or no data is immediately
- # available for read.
- while True:
- try:
- data = raw_socket.recv(1)
- if not data:
- break
- drained_data.append(data)
- except socket.error, e:
- # e can be either a pair (errno, string) or just a string (or
- # something else) telling what went wrong. We suppress only
- # the errors that indicates that the socket blocks. Those
- # exceptions can be parsed as a pair (errno, string).
- try:
- error_number, message = e
- except:
- # Failed to parse socket.error.
- raise e
-
- if _is_ewouldblock_errno(error_number):
- break
- else:
- raise e
-
- # Rollback timeout value.
- raw_socket.settimeout(original_timeout)
-
- return ''.join(drained_data)
-
-
# vi:sts=4 sw=4 et
diff --git a/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py
index 68c2fb7f5..c7082c84a 100755..100644
--- a/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py
+++ b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests.py
@@ -1,5 +1,3 @@
-#!/usr/bin/env python
-
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -157,7 +155,3 @@ def main():
return 2
return 0
-
-
-if __name__ == "__main__":
- sys.exit(main())
diff --git a/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py
index b3b4d583e..028a4c6f2 100644
--- a/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py
+++ b/Tools/Scripts/webkitpy/to_be_moved/update_webgl_conformance_tests_unittest.py
@@ -1,4 +1,3 @@
-#!/usr/bin/python
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
@@ -29,7 +28,7 @@
"""Unit tests for update_webgl_conformance_tests."""
-import unittest
+import unittest2 as unittest
from webkitpy.to_be_moved import update_webgl_conformance_tests as webgl
@@ -96,7 +95,3 @@ class TestTranslation(unittest.TestCase):
input_text = head + input_text + foot
output_text = head + output_text + foot
self.assert_translate(input_text, output_text)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/bot/botinfo.py b/Tools/Scripts/webkitpy/tool/bot/botinfo.py
index b9fd938aa..11a3d40f4 100644
--- a/Tools/Scripts/webkitpy/tool/bot/botinfo.py
+++ b/Tools/Scripts/webkitpy/tool/bot/botinfo.py
@@ -29,11 +29,12 @@
# FIXME: We should consider hanging one of these off the tool object.
class BotInfo(object):
- def __init__(self, tool):
+ def __init__(self, tool, port_name):
self._tool = tool
+ self._port_name = port_name
def summary_text(self):
# bot_id is also stored on the options dictionary on the tool.
bot_id = self._tool.status_server.bot_id
bot_id_string = "Bot: %s " % (bot_id) if bot_id else ""
- return "%sPort: %s Platform: %s" % (bot_id_string, self._tool.port().name(), self._tool.platform.display_name())
+ return "%sPort: %s Platform: %s" % (bot_id_string, self._port_name, self._tool.platform.display_name())
diff --git a/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py b/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py
index 820ff559e..04861f452 100644
--- a/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/botinfo_unittest.py
@@ -26,11 +26,12 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.tool.bot.botinfo import BotInfo
from webkitpy.tool.mocktool import MockTool
from webkitpy.common.net.statusserver_mock import MockStatusServer
+from webkitpy.port.test import TestPort
class BotInfoTest(unittest.TestCase):
@@ -38,4 +39,4 @@ class BotInfoTest(unittest.TestCase):
def test_summary_text(self):
tool = MockTool()
tool.status_server = MockStatusServer("MockBotId")
- self.assertEqual(BotInfo(tool).summary_text(), "Bot: MockBotId Port: MockPort Platform: MockPlatform 1.0")
+ self.assertEqual(BotInfo(tool, 'port-name').summary_text(), "Bot: MockBotId Port: port-name Platform: MockPlatform 1.0")
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
index 491ba79da..a95c7b103 100644
--- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
+++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask.py
@@ -55,6 +55,7 @@ class CommitQueueTask(PatchAnalysisTask):
def _validate_changelog(self):
return self._run_command([
"validate-changelog",
+ "--check-oops",
"--non-interactive",
self._patch.id(),
],
@@ -88,7 +89,7 @@ class CommitQueueTask(PatchAnalysisTask):
# no one has set commit-queue- since we started working on the patch.)
if not self.validate():
return False
- # FIXME: We should understand why the land failure occured and retry if possible.
+ # FIXME: We should understand why the land failure occurred and retry if possible.
if not self._land():
return self.report_failure()
return True
diff --git a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
index 2211b1de0..1eabde1b5 100644
--- a/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py
@@ -28,7 +28,7 @@
from datetime import datetime
import logging
-import unittest
+import unittest2 as unittest
from webkitpy.common.net import bugzilla
from webkitpy.common.net.layouttestresults import LayoutTestResults
@@ -124,6 +124,7 @@ class GoldenScriptError(ScriptError):
class CommitQueueTaskTest(unittest.TestCase):
def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
+ self.maxDiff = None
tool = MockTool(log_executive=True)
patch = tool.bugs.fetch_attachment(10000)
task = CommitQueueTask(commit_queue, patch)
@@ -140,7 +141,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -160,7 +161,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -218,7 +219,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
@@ -237,7 +238,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
@@ -261,7 +262,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
@@ -280,7 +281,7 @@ command_failed: failure_message='Unable to build without patch' script_error='MO
ScriptError("MOCK tests failure"),
])
# CommitQueueTask will only report flaky tests if we successfully parsed
- # results.html and returned a LayoutTestResults object, so we fake one.
+ # results.json and returned a LayoutTestResults object, so we fake one.
commit_queue.test_results = lambda: LayoutTestResults([])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
@@ -288,7 +289,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -322,7 +323,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -358,7 +359,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -372,7 +373,7 @@ command_failed: failure_message='Patch does not pass tests' script_error='MOCK t
patch = tool.bugs.fetch_attachment(10000)
task = CommitQueueTask(commit_queue, patch)
success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs)
- self.assertEqual(success, False)
+ self.assertFalse(success)
def test_test_failure(self):
commit_queue = MockCommitQueue([
@@ -390,7 +391,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -429,7 +430,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -472,7 +473,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -511,7 +512,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
@@ -545,7 +546,7 @@ run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
-run_webkit_patch: ['validate-changelog', '--non-interactive', 10000]
+run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
diff --git a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
index 3cee3f059..b639856f3 100644
--- a/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/expectedfailures_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.tool.bot.expectedfailures import ExpectedFailures
diff --git a/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
index 9d0b71408..b70a6371e 100644
--- a/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/feeders_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
@@ -40,7 +40,7 @@ class FeedersTest(unittest.TestCase):
feeder = CommitQueueFeeder(MockTool())
expected_logs = """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
-MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
+MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.\n\nnon-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
index 7be4a4a30..b9fdf669c 100644
--- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
+++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter.py
@@ -42,7 +42,8 @@ class FlakyTestReporter(object):
def __init__(self, tool, bot_name):
self._tool = tool
self._bot_name = bot_name
- self._bot_info = BotInfo(tool)
+ # FIXME: Use the real port object
+ self._bot_info = BotInfo(tool, tool.deprecated_port().name())
def _author_emails_for_test(self, flaky_test):
test_path = path_for_layout_test(flaky_test)
@@ -139,12 +140,8 @@ If you would like to track this test fix with another bug, please close this bug
bug = self._tool.bugs.fetch_bug(bug.duplicate_of())
return bug
- # Maybe this logic should move into Bugzilla? a reopen=True arg to post_comment?
def _update_bug_for_flaky_test(self, bug, latest_flake_message):
- if bug.is_closed():
- self._tool.bugs.reopen_bug(bug.id(), latest_flake_message)
- else:
- self._tool.bugs.post_comment_to_bug(bug.id(), latest_flake_message)
+ self._tool.bugs.post_comment_to_bug(bug.id(), latest_flake_message)
# This method is needed because our archive paths include a leading tmp/layout-test-results
def _find_in_archive(self, path, archive):
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
index 48c511281..5e30a66aa 100644
--- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.config.committers import Committer
from webkitpy.common.system.filesystem_mock import MockFileSystem
@@ -46,7 +46,7 @@ class MockCommitInfo(object):
def author(self):
# It's definitely possible to have commits with authors who
- # are not in our committers.py list.
+ # are not in our contributors.json list.
if not self._author_email:
return None
return Committer("Mock Committer", self._author_email)
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command.py b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
index 1c061a8db..9b9915769 100644
--- a/Tools/Scripts/webkitpy/tool/bot/irc_command.py
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
@@ -33,6 +33,7 @@ import re
from webkitpy.common.config import irc as config_irc
from webkitpy.common.config import urls
from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.net.web import Web
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.grammar import join_with_separators
@@ -48,19 +49,121 @@ def _post_error_and_check_for_bug_url(tool, nicks_string, exception):
# FIXME: Merge with Command?
class IRCCommand(object):
+ usage_string = None
+ help_string = None
+
+ def execute(self, nick, args, tool, sheriff):
+ raise NotImplementedError("subclasses must implement")
+
+ @classmethod
+ def usage(cls, nick):
+ return "%s: Usage: %s" % (nick, cls.usage_string)
+
+ @classmethod
+ def help(cls, nick):
+ return "%s: %s" % (nick, cls.help_string)
+
+
+class CreateBug(IRCCommand):
+ usage_string = "create-bug BUG_TITLE"
+ help_string = "Creates a Bugzilla bug with the given title."
+
+ def execute(self, nick, args, tool, sheriff):
+ if not args:
+ return self.usage(nick)
+
+ bug_title = " ".join(args)
+ bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
+
+ # There happens to be a committers list hung off of Bugzilla, so
+ # re-using that one makes things easiest for now.
+ requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
+ requester_email = requester.bugzilla_email() if requester else None
+
+ try:
+ bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
+ bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
+ return "%s: Created bug: %s" % (nick, bug_url)
+ except Exception, e:
+ return "%s: Failed to create bug:\n%s" % (nick, e)
+
+
+class Help(IRCCommand):
+ usage_string = "help [COMMAND]"
+ help_string = "Provides help on my individual commands."
+
+ def execute(self, nick, args, tool, sheriff):
+ if args:
+ for command_name in args:
+ if command_name in commands:
+ self._post_command_help(nick, tool, commands[command_name])
+ else:
+ tool.irc().post("%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys()))))
+ tool.irc().post('%s: Type "%s: help COMMAND" for help on my individual commands.' % (nick, sheriff.name()))
+
+ def _post_command_help(self, nick, tool, command):
+ tool.irc().post(command.usage(nick))
+ tool.irc().post(command.help(nick))
+ aliases = " ".join(sorted(filter(lambda alias: commands[alias] == command and alias not in visible_commands, commands)))
+ if aliases:
+ tool.irc().post("%s: Aliases: %s" % (nick, aliases))
+
+
+class Hi(IRCCommand):
+ usage_string = "hi"
+ help_string = "Responds with hi."
+
+ def execute(self, nick, args, tool, sheriff):
+ if len(args) and re.match(sheriff.name() + r'_*\s*!\s*', ' '.join(args)):
+ return "%s: hi %s!" % (nick, nick)
+ quips = tool.bugs.quips()
+ quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
+ return random.choice(quips)
+
+
+class PingPong(IRCCommand):
+ usage_string = "ping"
+ help_string = "Responds with pong."
+
def execute(self, nick, args, tool, sheriff):
- raise NotImplementedError, "subclasses must implement"
+ return nick + ": pong"
+
+
+class YouThere(IRCCommand):
+ usage_string = "yt?"
+ help_string = "Responds with yes."
+
+ def execute(self, nick, args, tool, sheriff):
+ return "%s: yes" % nick
class Restart(IRCCommand):
+ usage_string = "restart"
+ help_string = "Restarts sherrifbot. Will update its WebKit checkout, and re-join the channel momentarily."
+
def execute(self, nick, args, tool, sheriff):
tool.irc().post("Restarting...")
raise TerminateQueue()
+class RollChromiumDEPS(IRCCommand):
+ usage_string = "roll-chromium-deps REVISION"
+ help_string = "Rolls WebKit's Chromium DEPS to the given revision???"
+
+ def execute(self, nick, args, tool, sheriff):
+ if not len(args):
+ return self.usage(nick)
+ tool.irc().post("%s: Will roll Chromium DEPS to %s" % (nick, ' '.join(args)))
+ tool.irc().post("%s: Rolling Chromium DEPS to %s" % (nick, ' '.join(args)))
+ tool.irc().post("%s: Rolled Chromium DEPS to %s" % (nick, ' '.join(args)))
+ tool.irc().post("%s: Thank You" % nick)
+
+
class Rollout(IRCCommand):
- def _extract_revisions(self, arg):
+ usage_string = "rollout SVN_REVISION [SVN_REVISIONS] REASON"
+ help_string = "Opens a rollout bug, CCing author + reviewer, and attaching the reverse-diff of the given revisions marked as commit-queue=?."
+ def _extract_revisions(self, arg):
revision_list = []
possible_revisions = arg.split(",")
for revision in possible_revisions:
@@ -110,15 +213,28 @@ class Rollout(IRCCommand):
return ", ".join(target_nicks)
def _update_working_copy(self, tool):
- tool.scm().ensure_clean_working_directory(force_clean=True)
- tool.executive.run_and_throw_if_fail(tool.port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
+ tool.scm().discard_local_changes()
+ tool.executive.run_and_throw_if_fail(tool.deprecated_port().update_webkit_command(), quiet=True, cwd=tool.scm().checkout_root)
+
+ def _check_diff_failure(self, error_log, tool):
+ if not error_log:
+ return None
+
+ revert_failure_message_start = error_log.find("Failed to apply reverse diff for revision")
+ if revert_failure_message_start == -1:
+ return None
+
+ lines = error_log[revert_failure_message_start:].split('\n')[1:]
+ files = itertools.takewhile(lambda line: tool.filesystem.exists(tool.scm().absolute_path(line)), lines)
+ if files:
+ return "Failed to apply reverse diff for file(s): %s" % ", ".join(files)
+ return None
def execute(self, nick, args, tool, sheriff):
svn_revision_list, rollout_reason = self._parse_args(args)
if (not svn_revision_list or not rollout_reason):
- # return is equivalent to an irc().post(), but makes for easier unit testing.
- return "%s: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON" % nick
+ return self.usage(nick)
revision_urls_string = join_with_separators([urls.view_revision_url(revision) for revision in svn_revision_list])
tool.irc().post("%s: Preparing rollout for %s ..." % (nick, revision_urls_string))
@@ -137,109 +253,60 @@ class Rollout(IRCCommand):
tool.irc().post("%s: Created rollout: %s" % (nicks_string, bug_url))
except ScriptError, e:
tool.irc().post("%s: Failed to create rollout patch:" % nicks_string)
+ diff_failure = self._check_diff_failure(e.output, tool)
+ if diff_failure:
+ return "%s: %s" % (nicks_string, diff_failure)
_post_error_and_check_for_bug_url(tool, nicks_string, e)
-class RollChromiumDEPS(IRCCommand):
- def _parse_args(self, args):
- if not args:
- return
- revision = args[0].lstrip("r")
- if not revision.isdigit():
- return
- return revision
-
- def execute(self, nick, args, tool, sheriff):
- revision = self._parse_args(args)
-
- roll_target = "r%s" % revision if revision else "last-known good revision"
- tool.irc().post("%s: Rolling Chromium DEPS to %s" % (nick, roll_target))
-
- try:
- bug_id = sheriff.post_chromium_deps_roll(revision, roll_target)
- bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
- tool.irc().post("%s: Created DEPS roll: %s" % (nick, bug_url))
- except ScriptError, e:
- match = re.search(r"Current Chromium DEPS revision \d+ is newer than \d+\.", e.output)
- if match:
- tool.irc().post("%s: %s" % (nick, match.group(0)))
- return
- tool.irc().post("%s: Failed to create DEPS roll:" % nick)
- _post_error_and_check_for_bug_url(tool, nick, e)
-
-
-class Help(IRCCommand):
- def execute(self, nick, args, tool, sheriff):
- return "%s: Available commands: %s" % (nick, ", ".join(sorted(visible_commands.keys())))
+class Whois(IRCCommand):
+ usage_string = "whois SEARCH_STRING"
+ help_string = "Searches known contributors and returns any matches with irc, email and full name. Wild card * permitted."
+ def _full_record_and_nick(self, contributor):
+ result = ''
-class Hi(IRCCommand):
- def execute(self, nick, args, tool, sheriff):
- quips = tool.bugs.quips()
- quips.append('"Only you can prevent forest fires." -- Smokey the Bear')
- return random.choice(quips)
+ if contributor.irc_nicknames:
+ result += ' (:%s)' % ', :'.join(contributor.irc_nicknames)
+ if contributor.can_review:
+ result += ' (r)'
+ elif contributor.can_commit:
+ result += ' (c)'
-class Whois(IRCCommand):
- def _nick_or_full_record(self, contributor):
- if contributor.irc_nicknames:
- return ', '.join(contributor.irc_nicknames)
- return unicode(contributor)
+ return unicode(contributor) + result
def execute(self, nick, args, tool, sheriff):
- if len(args) != 1:
- return "%s: Usage: whois SEARCH_STRING" % nick
- search_string = args[0]
+ if not args:
+ return self.usage(nick)
+ search_string = unicode(" ".join(args))
# FIXME: We should get the ContributorList off the tool somewhere.
contributors = CommitterList().contributors_by_search_string(search_string)
if not contributors:
- return "%s: Sorry, I don't know any contributors matching '%s'." % (nick, search_string)
+ return unicode("%s: Sorry, I don't know any contributors matching '%s'.") % (nick, search_string)
if len(contributors) > 5:
- return "%s: More than 5 contributors match '%s', could you be more specific?" % (nick, search_string)
+ return unicode("%s: More than 5 contributors match '%s', could you be more specific?") % (nick, search_string)
if len(contributors) == 1:
contributor = contributors[0]
if not contributor.irc_nicknames:
- return "%s: %s hasn't told me their nick. Boo hoo :-(" % (nick, contributor)
- if contributor.emails and search_string.lower() not in map(lambda email: email.lower(), contributor.emails):
- formattedEmails = ', '.join(contributor.emails)
- return "%s: %s is %s (%s). Why do you ask?" % (nick, search_string, self._nick_or_full_record(contributor), formattedEmails)
- else:
- return "%s: %s is %s. Why do you ask?" % (nick, search_string, self._nick_or_full_record(contributor))
- contributor_nicks = map(self._nick_or_full_record, contributors)
+ return unicode("%s: %s hasn't told me their nick. Boo hoo :-(") % (nick, contributor)
+ return unicode("%s: %s is %s. Why do you ask?") % (nick, search_string, self._full_record_and_nick(contributor))
+ contributor_nicks = map(self._full_record_and_nick, contributors)
contributors_string = join_with_separators(contributor_nicks, only_two_separator=" or ", last_separator=', or ')
- return "%s: I'm not sure who you mean? %s could be '%s'." % (nick, contributors_string, search_string)
-
-
-class CreateBug(IRCCommand):
- def execute(self, nick, args, tool, sheriff):
- if not args:
- return "%s: Usage: create-bug BUG_TITLE" % nick
-
- bug_title = " ".join(args)
- bug_description = "%s\nRequested by %s on %s." % (bug_title, nick, config_irc.channel)
-
- # There happens to be a committers list hung off of Bugzilla, so
- # re-using that one makes things easiest for now.
- requester = tool.bugs.committers.contributor_by_irc_nickname(nick)
- requester_email = requester.bugzilla_email() if requester else None
-
- try:
- bug_id = tool.bugs.create_bug(bug_title, bug_description, cc=requester_email, assignee=requester_email)
- bug_url = tool.bugs.bug_url_for_bug_id(bug_id)
- return "%s: Created bug: %s" % (nick, bug_url)
- except Exception, e:
- return "%s: Failed to create bug:\n%s" % (nick, e)
+ return unicode("%s: I'm not sure who you mean? %s could be '%s'.") % (nick, contributors_string, search_string)
# FIXME: Lame. We should have an auto-registering CommandCenter.
visible_commands = {
+ "create-bug": CreateBug,
"help": Help,
"hi": Hi,
+ "ping": PingPong,
"restart": Restart,
+ "roll-chromium-deps": RollChromiumDEPS,
"rollout": Rollout,
"whois": Whois,
- "create-bug": CreateBug,
- "roll-chromium-deps": RollChromiumDEPS,
+ "yt?": YouThere,
}
# Add revert as an "easter egg" command. Why?
@@ -248,3 +315,5 @@ visible_commands = {
# people to use and it seems silly to have them hunt around for "rollout" instead.
commands = visible_commands.copy()
commands["revert"] = Rollout
+# "hello" Alias for "hi" command for the purposes of testing aliases
+commands["hello"] = Hi
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
index e307e6ea9..1bf26a158 100644
--- a/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
@@ -26,12 +26,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import os
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.bot.irc_command import *
from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.net.web_mock import MockWeb
from webkitpy.common.system.executive_mock import MockExecutive
+from webkitpy.common.system.filesystem_mock import MockFileSystem
class IRCCommandTest(unittest.TestCase):
@@ -39,21 +42,23 @@ class IRCCommandTest(unittest.TestCase):
whois = Whois()
self.assertEqual("tom: Usage: whois SEARCH_STRING",
whois.execute("tom", [], None, None))
- self.assertEqual("tom: Usage: whois SEARCH_STRING",
+ self.assertEqual('tom: Adam Barth is "Adam Barth" <abarth@webkit.org> (:abarth) (r). Why do you ask?',
whois.execute("tom", ["Adam", "Barth"], None, None))
self.assertEqual("tom: Sorry, I don't know any contributors matching 'unknown@example.com'.",
whois.execute("tom", ["unknown@example.com"], None, None))
- self.assertEqual("tom: tonyg@chromium.org is tonyg-cr. Why do you ask?",
+ self.assertEqual('tom: tonyg@chromium.org is "Tony Gentilcore" <tonyg@chromium.org> (:tonyg-cr) (r). Why do you ask?',
whois.execute("tom", ["tonyg@chromium.org"], None, None))
- self.assertEqual("tom: TonyG@Chromium.org is tonyg-cr. Why do you ask?",
+ self.assertEqual('tom: TonyG@Chromium.org is "Tony Gentilcore" <tonyg@chromium.org> (:tonyg-cr) (r). Why do you ask?',
whois.execute("tom", ["TonyG@Chromium.org"], None, None))
- self.assertEqual("tom: rniwa is rniwa (rniwa@webkit.org). Why do you ask?",
+ self.assertEqual('tom: rniwa is "Ryosuke Niwa" <rniwa@webkit.org> (:rniwa) (r). Why do you ask?',
whois.execute("tom", ["rniwa"], None, None))
- self.assertEqual("tom: lopez is xan (xan.lopez@gmail.com, xan@gnome.org, xan@webkit.org, xlopez@igalia.com). Why do you ask?",
+ self.assertEqual('tom: lopez is "Xan Lopez" <xan.lopez@gmail.com> (:xan) (r). Why do you ask?',
whois.execute("tom", ["lopez"], None, None))
+ self.assertEqual(u'tom: Osztrogon\u00e1c is "Csaba Osztrogon\u00e1c" <ossy@webkit.org> (:ossy) (r). Why do you ask?',
+ whois.execute("tom", [u'Osztrogon\u00e1c'], None, None))
self.assertEqual('tom: "Vicki Murley" <vicki@apple.com> hasn\'t told me their nick. Boo hoo :-(',
whois.execute("tom", ["vicki@apple.com"], None, None))
- self.assertEqual('tom: I\'m not sure who you mean? gavinp or gbarra could be \'Gavin\'.',
+ self.assertEqual('tom: I\'m not sure who you mean? "Gavin Peters" <gavinp@chromium.org> (:gavinp) (c) or "Gavin Barraclough" <barraclough@apple.com> (:gbarra) (r) could be \'Gavin\'.',
whois.execute("tom", ["Gavin"], None, None))
self.assertEqual('tom: More than 5 contributors match \'david\', could you be more specific?',
whois.execute("tom", ["david"], None, None))
@@ -77,11 +82,6 @@ class IRCCommandTest(unittest.TestCase):
self.assertEqual("tom: Failed to create bug:\nException from bugzilla!",
create_bug.execute("tom", example_args, tool, None))
- def test_roll_chromium_deps(self):
- roll = RollChromiumDEPS()
- self.assertEqual(None, roll._parse_args([]))
- self.assertEqual("1234", roll._parse_args(["1234"]))
-
def test_rollout_updates_working_copy(self):
rollout = Rollout()
tool = MockTool()
@@ -114,4 +114,30 @@ class IRCCommandTest(unittest.TestCase):
self.assertEqual("tom: Usage: rollout SVN_REVISION [SVN_REVISIONS] REASON",
rollout.execute("tom", [], None, None))
+ tool = MockTool()
+ tool.filesystem.files["/mock-checkout/test/file/one"] = ""
+ tool.filesystem.files["/mock-checkout/test/file/two"] = ""
+ self.assertEqual("Failed to apply reverse diff for file(s): test/file/one, test/file/two",
+ rollout._check_diff_failure("""
+Preparing rollout for bug 123456.
+Updating working directory
+Failed to apply reverse diff for revision 123456 because of the following conflicts:
+test/file/one
+test/file/two
+Failed to apply reverse diff for revision 123456 because of the following conflicts:
+test/file/one
+test/file/two
+Updating OpenSource
+Current branch master is up to date.
+ """, tool))
+ self.assertEqual(None, rollout._check_diff_failure("""
+Preparing rollout for bug 123456.
+Updating working directory
+Some other error report involving file paths:
+test/file/one
+test/file/two
+Updating OpenSource
+Current branch master is up to date.
+ """, tool))
+
# FIXME: We need a better way to test IRCCommands which call tool.irc().post()
diff --git a/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py b/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
index f96b7b6b5..7e1767023 100644
--- a/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
import random
from webkitpy.common.system.outputcapture import OutputCapture
@@ -88,8 +88,11 @@ class IRCBotTest(unittest.TestCase):
OutputCapture().assert_outputs(self, run, args=["hi"], expected_logs=expected_logs)
def test_help(self):
- expected_logs = "MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, restart, roll-chromium-deps, rollout, whois\n"
+ expected_logs = 'MOCK: irc.post: mock_nick: Available commands: create-bug, help, hi, ping, restart, roll-chromium-deps, rollout, whois, yt?\nMOCK: irc.post: mock_nick: Type "mock-sheriff-bot: help COMMAND" for help on my individual commands.\n'
OutputCapture().assert_outputs(self, run, args=["help"], expected_logs=expected_logs)
+ expected_logs = 'MOCK: irc.post: mock_nick: Usage: hi\nMOCK: irc.post: mock_nick: Responds with hi.\nMOCK: irc.post: mock_nick: Aliases: hello\n'
+ OutputCapture().assert_outputs(self, run, args=["help hi"], expected_logs=expected_logs)
+ OutputCapture().assert_outputs(self, run, args=["help hello"], expected_logs=expected_logs)
def test_restart(self):
expected_logs = "MOCK: irc.post: Restarting...\n"
@@ -103,14 +106,6 @@ class IRCBotTest(unittest.TestCase):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["revert 21654 This patch broke the world"], expected_logs=expected_logs)
- def test_roll_chromium_deps(self):
- expected_logs = "MOCK: irc.post: mock_nick: Rolling Chromium DEPS to r21654\nMOCK: irc.post: mock_nick: Created DEPS roll: http://example.com/36936\n"
- OutputCapture().assert_outputs(self, run, args=["roll-chromium-deps 21654"], expected_logs=expected_logs)
-
- def test_roll_chromium_deps_to_lkgr(self):
- expected_logs = "MOCK: irc.post: mock_nick: Rolling Chromium DEPS to last-known good revision\nMOCK: irc.post: mock_nick: Created DEPS roll: http://example.com/36936\n"
- OutputCapture().assert_outputs(self, run, args=["roll-chromium-deps"], expected_logs=expected_logs)
-
def test_multi_rollout(self):
expected_logs = "MOCK: irc.post: mock_nick: Preparing rollout for http://trac.webkit.org/changeset/21654, http://trac.webkit.org/changeset/21655, and http://trac.webkit.org/changeset/21656 ...\nMOCK: irc.post: mock_nick, abarth, darin, eseidel: Created rollout: http://example.com/36936\n"
OutputCapture().assert_outputs(self, run, args=["rollout 21654 21655 21656 This 21654 patch broke the world"], expected_logs=expected_logs)
diff --git a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
index 4e09f896f..a9e53ddb2 100644
--- a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
+++ b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2011 Google Inc. All rights reserved.
+# Copyright (c) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -35,31 +35,32 @@ from webkitpy.tool.steps.runtests import RunTests
_log = logging.getLogger(__name__)
+# FIXME: This class no longer has a clear purpose, and should probably
+# be made part of Port, or renamed to LayoutTestResultsArchiver or something more fitting?
class LayoutTestResultsReader(object):
- def __init__(self, tool, archive_directory):
- self._tool = tool
+ def __init__(self, host, results_directory, archive_directory):
+ self._host = host
+ self._results_directory = results_directory
self._archive_directory = archive_directory
# FIXME: This exists for mocking, but should instead be mocked via
- # tool.filesystem.read_text_file. They have different error handling at the moment.
+ # host.filesystem.read_text_file. They have different error handling at the moment.
def _read_file_contents(self, path):
try:
- return self._tool.filesystem.read_text_file(path)
+ return self._host.filesystem.read_text_file(path)
except IOError, e: # File does not exist or can't be read.
return None
# FIXME: This logic should move to the port object.
def _create_layout_test_results(self):
- results_path = self._tool.port().layout_tests_results_path()
+ results_path = self._host.filesystem.join(self._results_directory, "full_results.json")
results_html = self._read_file_contents(results_path)
if not results_html:
return None
return LayoutTestResults.results_from_string(results_html)
def _create_unit_test_results(self):
- results_path = self._tool.port().unit_tests_results_path()
- if not results_path:
- return None
+ results_path = self._host.filesystem.join(self._results_directory, "webkit_unit_tests_output.xml")
results_xml = self._read_file_contents(results_path)
if not results_xml:
return None
@@ -69,36 +70,28 @@ class LayoutTestResultsReader(object):
layout_test_results = self._create_layout_test_results()
unit_test_results = self._create_unit_test_results()
if layout_test_results:
- # FIXME: We should not have to set failure_limit_count, but we
- # do until run-webkit-tests can be updated save off the value
- # of --exit-after-N-failures in results.html/results.json.
- # https://bugs.webkit.org/show_bug.cgi?id=58481
+ # FIXME: This is used to detect if we had N failures due to
+ # N tests failing, or if we hit the "exit-after-n-failures" limit.
+ # These days we could just check for the "interrupted" key in results.json instead!
layout_test_results.set_failure_limit_count(RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT)
if unit_test_results:
layout_test_results.add_unit_test_failures(unit_test_results)
return layout_test_results
- def _results_directory(self):
- results_path = self._tool.port().layout_tests_results_path()
- # FIXME: This is wrong in two ways:
- # 1. It assumes that results.html is at the top level of the results tree.
- # 2. This uses the "old" ports.py infrastructure instead of the new layout_tests/port
- # which will not support Chromium. However the new arch doesn't work with old-run-webkit-tests
- # so we have to use this for now.
- return self._tool.filesystem.dirname(results_path)
-
def archive(self, patch):
- results_directory = self._results_directory()
- results_name, _ = self._tool.filesystem.splitext(self._tool.filesystem.basename(results_directory))
+ filesystem = self._host.filesystem
+ workspace = self._host.workspace
+ results_directory = self._results_directory
+ results_name, _ = filesystem.splitext(filesystem.basename(results_directory))
# Note: We name the zip with the bug_id instead of patch_id to match work_item_log_path().
- zip_path = self._tool.workspace.find_unused_filename(self._archive_directory, "%s-%s" % (patch.bug_id(), results_name), "zip")
+ zip_path = workspace.find_unused_filename(self._archive_directory, "%s-%s" % (patch.bug_id(), results_name), "zip")
if not zip_path:
return None
- if not self._tool.filesystem.isdir(results_directory):
+ if not filesystem.isdir(results_directory):
_log.info("%s does not exist, not archiving." % results_directory)
return None
- archive = self._tool.workspace.create_zip(zip_path, results_directory)
+ archive = workspace.create_zip(filesystem.abspath(zip_path), filesystem.abspath(results_directory))
# Remove the results directory to prevent http logs, etc. from getting huge between runs.
# We could have create_zip remove the original, but this is more explicit.
- self._tool.filesystem.rmtree(results_directory)
+ filesystem.rmtree(results_directory)
return archive
diff --git a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
index 6079632bd..c779bb8df 100644
--- a/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/layouttestresultsreader_unittest.py
@@ -26,32 +26,33 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.layouttestresults import LayoutTestResults
-from webkitpy.tool.bot.layouttestresultsreader import *
-from webkitpy.tool.mocktool import MockTool
+from webkitpy.common.host_mock import MockHost
+
+from .layouttestresultsreader import LayoutTestResultsReader
class LayoutTestResultsReaderTest(unittest.TestCase):
def test_missing_layout_test_results(self):
- tool = MockTool()
- reader = LayoutTestResultsReader(tool, "/var/logs")
+ host = MockHost()
+ reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
layout_tests_results_path = '/mock-results/full_results.json'
unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
- tool.filesystem = MockFileSystem({layout_tests_results_path: None,
+ host.filesystem = MockFileSystem({layout_tests_results_path: None,
unit_tests_results_path: None})
# Make sure that our filesystem mock functions as we expect.
- self.assertRaises(IOError, tool.filesystem.read_text_file, layout_tests_results_path)
- self.assertRaises(IOError, tool.filesystem.read_text_file, unit_tests_results_path)
- # layout_test_results shouldn't raise even if the results.html file is missing.
- self.assertEqual(reader.results(), None)
+ self.assertRaises(IOError, host.filesystem.read_text_file, layout_tests_results_path)
+ self.assertRaises(IOError, host.filesystem.read_text_file, unit_tests_results_path)
+ # layout_test_results shouldn't raise even if the results.json file is missing.
+ self.assertIsNone(reader.results())
def test_create_unit_test_results(self):
- tool = MockTool()
- reader = LayoutTestResultsReader(tool, "/var/logs")
+ host = MockHost()
+ reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
unit_tests_results_path = '/mock-results/webkit_unit_tests_output.xml'
no_failures_xml = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="3" failures="0" disabled="0" errors="0" time="11.35" name="AllTests">
@@ -61,45 +62,59 @@ class LayoutTestResultsReaderTest(unittest.TestCase):
<testcase name="CrashIfSettingUnsetRowIndex" status="run" time="0.123" classname="RenderTableCellDeathTest" />
</testsuite>
</testsuites>"""
- tool.filesystem = MockFileSystem({unit_tests_results_path: no_failures_xml})
+ host.filesystem = MockFileSystem({unit_tests_results_path: no_failures_xml})
self.assertEqual(reader._create_unit_test_results(), [])
def test_missing_unit_test_results_path(self):
- tool = MockTool()
- tool.port().unit_tests_results_path = lambda: None
- reader = LayoutTestResultsReader(tool, "/var/logs")
+ host = MockHost()
+ reader = LayoutTestResultsReader(host, "/mock-results", "/var/logs")
reader._create_layout_test_results = lambda: LayoutTestResults([])
+ reader._create_unit_test_results = lambda: None
# layout_test_results shouldn't raise even if the unit tests xml file is missing.
- self.assertNotEquals(reader.results(), None)
+ self.assertIsNotNone(reader.results(), None)
self.assertEqual(reader.results().failing_tests(), [])
def test_layout_test_results(self):
- reader = LayoutTestResultsReader(MockTool(), "/var/logs")
+ reader = LayoutTestResultsReader(MockHost(), "/mock-results", "/var/logs")
reader._read_file_contents = lambda path: None
- self.assertEqual(reader.results(), None)
+ self.assertIsNone(reader.results())
reader._read_file_contents = lambda path: ""
- self.assertEqual(reader.results(), None)
+ self.assertIsNone(reader.results())
reader._create_layout_test_results = lambda: LayoutTestResults([])
results = reader.results()
- self.assertNotEquals(results, None)
+ self.assertIsNotNone(results)
self.assertEqual(results.failure_limit_count(), 30) # This value matches RunTests.NON_INTERACTIVE_FAILURE_LIMIT_COUNT
def test_archive_last_layout_test_results(self):
- tool = MockTool()
- reader = LayoutTestResultsReader(tool, "/var/logs")
- patch = tool.bugs.fetch_attachment(10001)
- tool.filesystem = MockFileSystem()
+ host = MockHost()
+ results_directory = "/mock-results"
+ reader = LayoutTestResultsReader(host, results_directory, "/var/logs")
+ patch = host.bugs.fetch_attachment(10001)
+ host.filesystem = MockFileSystem()
# Should fail because the results_directory does not exist.
expected_logs = "/mock-results does not exist, not archiving.\n"
archive = OutputCapture().assert_outputs(self, reader.archive, [patch], expected_logs=expected_logs)
- self.assertEqual(archive, None)
+ self.assertIsNone(archive)
- results_directory = "/mock-results"
- # Sanity check what we assume our mock results directory is.
- self.assertEqual(reader._results_directory(), results_directory)
- tool.filesystem.maybe_make_directory(results_directory)
- self.assertTrue(tool.filesystem.exists(results_directory))
+ host.filesystem.maybe_make_directory(results_directory)
+ self.assertTrue(host.filesystem.exists(results_directory))
+
+ self.assertIsNotNone(reader.archive(patch))
+ self.assertFalse(host.filesystem.exists(results_directory))
- self.assertNotEqual(reader.archive(patch), None)
- self.assertFalse(tool.filesystem.exists(results_directory))
+ def test_archive_last_layout_test_results_with_relative_path(self):
+ host = MockHost()
+ results_directory = "/mock-checkout/layout-test-results"
+
+ host.filesystem.maybe_make_directory(results_directory)
+ host.filesystem.maybe_make_directory('/var/logs')
+ self.assertTrue(host.filesystem.exists(results_directory))
+
+ host.filesystem.chdir('/var')
+ reader = LayoutTestResultsReader(host, results_directory, 'logs')
+ patch = host.bugs.fetch_attachment(10001)
+ # Should fail because the results_directory does not exist.
+ self.assertIsNotNone(reader.archive(patch))
+ self.assertEqual(host.workspace.source_path, results_directory)
+ self.assertEqual(host.workspace.zip_path, '/var/logs/50000-layout-test-results.zip')
diff --git a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
index cde1c842e..b01c6c7e2 100644
--- a/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
+++ b/Tools/Scripts/webkitpy/tool/bot/patchanalysistask.py
@@ -192,7 +192,7 @@ class PatchAnalysisTask(object):
return True
if self._test():
- # Only report flaky tests if we were successful at parsing results.html and archiving results.
+ # Only report flaky tests if we were successful at parsing results.json and archiving results.
if first_results and first_results_archive:
self._report_flaky_tests(first_results.failing_test_results(), first_results_archive)
return True
diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine.py b/Tools/Scripts/webkitpy/tool/bot/queueengine.py
index 6d5576e28..90e553f86 100644
--- a/Tools/Scripts/webkitpy/tool/bot/queueengine.py
+++ b/Tools/Scripts/webkitpy/tool/bot/queueengine.py
@@ -69,15 +69,14 @@ class QueueEngineDelegate:
class QueueEngine:
- def __init__(self, name, delegate, wakeup_event):
+ def __init__(self, name, delegate, wakeup_event, seconds_to_sleep=120):
self._name = name
self._delegate = delegate
self._wakeup_event = wakeup_event
self._output_tee = OutputTee()
+ self._seconds_to_sleep = seconds_to_sleep
log_date_format = "%Y-%m-%d %H:%M:%S"
- sleep_duration_text = "2 mins" # This could be generated from seconds_to_sleep
- seconds_to_sleep = 120
handled_error_code = 2
# Child processes exit with a special code to the parent queue process can detect the error was handled.
@@ -153,10 +152,14 @@ class QueueEngine:
return datetime.now()
def _sleep_message(self, message):
- wake_time = self._now() + timedelta(seconds=self.seconds_to_sleep)
- return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), self.sleep_duration_text)
+ wake_time = self._now() + timedelta(seconds=self._seconds_to_sleep)
+ if self._seconds_to_sleep < 3 * 60:
+ sleep_duration_text = str(self._seconds_to_sleep) + ' seconds'
+ else:
+ sleep_duration_text = str(round(self._seconds_to_sleep / 60)) + ' minutes'
+ return "%s Sleeping until %s (%s)." % (message, wake_time.strftime(self.log_date_format), sleep_duration_text)
def _sleep(self, message):
_log.info(self._sleep_message(message))
- self._wakeup_event.wait(self.seconds_to_sleep)
+ self._wakeup_event.wait(self._seconds_to_sleep)
self._wakeup_event.clear()
diff --git a/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
index 0ee8b5ad8..de9fa2398 100644
--- a/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/queueengine_unittest.py
@@ -31,7 +31,7 @@ import os
import shutil
import tempfile
import threading
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
@@ -168,12 +168,12 @@ class QueueEngineTest(unittest.TestCase):
def test_now(self):
"""Make sure there are no typos in the QueueEngine.now() method."""
engine = QueueEngine("test", None, None)
- self.assertTrue(isinstance(engine._now(), datetime.datetime))
+ self.assertIsInstance(engine._now(), datetime.datetime)
def test_sleep_message(self):
engine = QueueEngine("test", None, None)
engine._now = lambda: datetime.datetime(2010, 1, 1)
- expected_sleep_message = "MESSAGE Sleeping until 2010-01-01 00:02:00 (2 mins)."
+ expected_sleep_message = "MESSAGE Sleeping until 2010-01-01 00:02:00 (120 seconds)."
self.assertEqual(engine._sleep_message("MESSAGE"), expected_sleep_message)
def setUp(self):
@@ -181,7 +181,3 @@ class QueueEngineTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.temp_dir)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff.py b/Tools/Scripts/webkitpy/tool/bot/sheriff.py
index b4e95aec0..9ef487537 100644
--- a/Tools/Scripts/webkitpy/tool/bot/sheriff.py
+++ b/Tools/Scripts/webkitpy/tool/bot/sheriff.py
@@ -36,6 +36,9 @@ class Sheriff(object):
self._tool = tool
self._sheriffbot = sheriffbot
+ def name(self):
+ return self._sheriffbot.name
+
def responsible_nicknames_from_commit_info(self, commit_info):
nestedList = [party.irc_nicknames for party in commit_info.responsible_parties() if party.irc_nicknames]
return reduce(lambda list, childList: list + childList, nestedList)
@@ -88,18 +91,6 @@ class Sheriff(object):
])
return urls.parse_bug_id(output)
- def post_chromium_deps_roll(self, revision, revision_name):
- args = [
- "post-chromium-deps-roll",
- "--force-clean",
- "--non-interactive",
- "--parent-command=sheriff-bot",
- ]
- # revision can be None, but revision_name is always something meaningful.
- args += [revision, revision_name]
- output = self._sheriffbot.run_webkit_patch(args)
- return urls.parse_bug_id(output)
-
def post_blame_comment_on_bug(self, commit_info, builders, tests):
if not commit_info.bug_id():
return
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py b/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py
index 02fc03608..cf989c9ce 100644
--- a/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/sheriff_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.outputcapture import OutputCapture
diff --git a/Tools/Scripts/webkitpy/tool/commands/__init__.py b/Tools/Scripts/webkitpy/tool/commands/__init__.py
index 45711815e..a584ee037 100644
--- a/Tools/Scripts/webkitpy/tool/commands/__init__.py
+++ b/Tools/Scripts/webkitpy/tool/commands/__init__.py
@@ -5,11 +5,11 @@ from webkitpy.tool.commands.analyzechangelog import AnalyzeChangeLog
from webkitpy.tool.commands.applywatchlistlocal import ApplyWatchListLocal
from webkitpy.tool.commands.bugfortest import BugForTest
from webkitpy.tool.commands.bugsearch import BugSearch
-from webkitpy.tool.commands.chromechannels import ChromeChannels
from webkitpy.tool.commands.download import *
-from webkitpy.tool.commands.earlywarningsystem import *
+from webkitpy.tool.commands.earlywarningsystem import AbstractEarlyWarningSystem
from webkitpy.tool.commands.findusers import FindUsers
from webkitpy.tool.commands.gardenomatic import GardenOMatic
+from webkitpy.tool.commands.newcommitbot import NewCommitBot
from webkitpy.tool.commands.openbugs import OpenBugs
from webkitpy.tool.commands.perfalizer import Perfalizer
from webkitpy.tool.commands.prettydiff import PrettyDiff
@@ -17,7 +17,8 @@ from webkitpy.tool.commands.queries import *
from webkitpy.tool.commands.queues import *
from webkitpy.tool.commands.rebaseline import Rebaseline
from webkitpy.tool.commands.rebaselineserver import RebaselineServer
-from webkitpy.tool.commands.roll import *
from webkitpy.tool.commands.sheriffbot import *
from webkitpy.tool.commands.upload import *
from webkitpy.tool.commands.suggestnominations import *
+
+AbstractEarlyWarningSystem.load_ews_classes()
diff --git a/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py b/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
index 0a7788c11..25a36ce0b 100644
--- a/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
+++ b/Tools/Scripts/webkitpy/tool/commands/abstractlocalservercommand.py
@@ -25,10 +25,10 @@
from optparse import make_option
import threading
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
-class AbstractLocalServerCommand(AbstractDeclarativeCommand):
+class AbstractLocalServerCommand(Command):
server = None
launch_path = "/"
@@ -37,7 +37,7 @@ class AbstractLocalServerCommand(AbstractDeclarativeCommand):
make_option("--httpd-port", action="store", type="int", default=8127, help="Port to use for the HTTP server"),
make_option("--no-show-results", action="store_false", default=True, dest="show_results", help="Don't launch a browser with the rebaseline server"),
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
def _prepare_config(self, options, args, tool):
return None
@@ -53,5 +53,5 @@ class AbstractLocalServerCommand(AbstractDeclarativeCommand):
# FIXME: This seems racy.
threading.Timer(0.1, lambda: self._tool.user.open_url(server_url)).start()
- httpd = self.server(httpd_port=options.httpd_port, config=config) # pylint: disable-msg=E1102
+ httpd = self.server(httpd_port=options.httpd_port, config=config) # pylint: disable=E1102
httpd.serve_forever()
diff --git a/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py b/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
index 0593f2cfc..fcc76ca14 100644
--- a/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
+++ b/Tools/Scripts/webkitpy/tool/commands/abstractsequencedcommand.py
@@ -30,16 +30,16 @@ import logging
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.commands.stepsequence import StepSequence
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
-class AbstractSequencedCommand(AbstractDeclarativeCommand):
+class AbstractSequencedCommand(Command):
steps = None
def __init__(self):
self._sequence = StepSequence(self.steps)
- AbstractDeclarativeCommand.__init__(self, self._sequence.options())
+ Command.__init__(self, self._sequence.options())
def _prepare_state(self, options, args, tool):
return None
diff --git a/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py b/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py
index 22869584d..25e719f61 100644
--- a/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py
+++ b/Tools/Scripts/webkitpy/tool/commands/adduserstogroups.py
@@ -26,10 +26,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
-class AddUsersToGroups(AbstractDeclarativeCommand):
+class AddUsersToGroups(Command):
name = "add-users-to-groups"
help_text = "Add users matching subtring to specified groups"
diff --git a/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py
index b88b61f55..1a1e810de 100644
--- a/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py
+++ b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog.py
@@ -35,11 +35,11 @@ from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.common.config.contributionareas import ContributionAreas
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import Executive
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
from webkitpy.tool import steps
-class AnalyzeChangeLog(AbstractDeclarativeCommand):
+class AnalyzeChangeLog(Command):
name = "analyze-changelog"
help_text = "Experimental command for analyzing change logs."
long_help = "This command parses changelogs in a specified directory and summarizes the result as JSON files."
@@ -48,7 +48,7 @@ class AnalyzeChangeLog(AbstractDeclarativeCommand):
options = [
steps.Options.changelog_count,
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
@staticmethod
def _enumerate_changelogs(filesystem, dirname, changelog_count):
@@ -180,6 +180,7 @@ class ChangeLogAnalyzer(object):
def _analyze_entries(self, entries, changelog_path):
dirname = self._filesystem.dirname(changelog_path)
+ i = 0
for i, entry in enumerate(entries):
self._print_status('(%s) entries' % i)
assert(entry.authors())
@@ -201,6 +202,5 @@ class ChangeLogAnalyzer(object):
self._summary['reviewed' if reviewers_for_entry else 'unreviewed'] += 1
- i += 1
self._print_status('(%s) entries' % i)
return i
diff --git a/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py
index 661d2d85f..9c13740a2 100644
--- a/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/analyzechangelog_unittest.py
@@ -36,7 +36,7 @@ from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.commands.analyzechangelog import AnalyzeChangeLog
from webkitpy.tool.commands.analyzechangelog import ChangeLogAnalyzer
from webkitpy.tool.commands.commandtest import CommandsTest
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
class AnalyzeChangeLogTest(CommandsTest):
diff --git a/Tools/Scripts/webkitpy/tool/commands/bugfortest.py b/Tools/Scripts/webkitpy/tool/commands/bugfortest.py
index 36aa6b5f1..f6f84115e 100644
--- a/Tools/Scripts/webkitpy/tool/commands/bugfortest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/bugfortest.py
@@ -26,14 +26,14 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
from webkitpy.tool.bot.flakytestreporter import FlakyTestReporter
# This is mostly a command for testing FlakyTestReporter, however
# it could be easily expanded to auto-create bugs, etc. if another
# command outside of webkitpy wanted to use it.
-class BugForTest(AbstractDeclarativeCommand):
+class BugForTest(Command):
name = "bug-for-test"
help_text = "Finds the bugzilla bug for a given test"
diff --git a/Tools/Scripts/webkitpy/tool/commands/bugsearch.py b/Tools/Scripts/webkitpy/tool/commands/bugsearch.py
index a1d74c548..1f3af7a70 100644
--- a/Tools/Scripts/webkitpy/tool/commands/bugsearch.py
+++ b/Tools/Scripts/webkitpy/tool/commands/bugsearch.py
@@ -26,10 +26,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
-class BugSearch(AbstractDeclarativeCommand):
+class BugSearch(Command):
name = "bug-search"
help_text = "List bugs matching a query"
argument_names = "QUERY"
diff --git a/Tools/Scripts/webkitpy/tool/commands/chromechannels.py b/Tools/Scripts/webkitpy/tool/commands/chromechannels.py
deleted file mode 100644
index da093b48c..000000000
--- a/Tools/Scripts/webkitpy/tool/commands/chromechannels.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# Copyright (c) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from optparse import make_option
-
-from webkitpy.common.net.omahaproxy import OmahaProxy
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
-
-import re
-
-
-class ChromeChannels(AbstractDeclarativeCommand):
- name = "chrome-channels"
- help_text = "List which chrome channels include the patches in bugs returned by QUERY."
- argument_names = "QUERY"
- long_help = """Retrieves the current list of Chrome releases from omahaproxy.appspot.com,
-and then runs the bugzilla quicksearch QUERY on bugs.bugzilla.org. For each bug
-returned by query, a single svn commit is deduced, and a short summary is
-printed of each bug listing which Chrome channels contain each bugs associated
-commit.
-
-The QUERY can be as simple as a bug number, or a comma delimited list of bug
-numbers. See https://bugzilla.mozilla.org/page.cgi?id=quicksearch.html for full
-documentation on the query format."""
-
- chrome_channels = OmahaProxy.chrome_channels
- commited_pattern = "Committed r([0-9]+): <http://trac.webkit.org/changeset/\\1>"
- rollout_pattern = "Rolled out in http://trac.webkit.org/changeset/[0-9]+"
-
- def __init__(self):
- AbstractDeclarativeCommand.__init__(self)
- self._re_committed = re.compile(self.commited_pattern)
- self._re_rollout = re.compile(self.rollout_pattern)
- self._omahaproxy = OmahaProxy()
-
- def _channels_for_bug(self, revisions, bug):
- comments = bug.comments()
- commit = None
-
- # Scan the comments, looking for a sane list of commits and rollbacks.
- for comment in comments:
- commit_match = self._re_committed.search(comment['text'])
- if commit_match:
- if commit:
- return "%5s %s\n... has too confusing a commit history to parse, skipping\n" % (bug.id(), bug.title())
- commit = int(commit_match.group(1))
- if self._re_rollout.search(comment['text']):
- commit = None
- if not commit:
- return "%5s %s\n... does not appear to have an associated commit.\n" % (bug.id(), bug.title())
-
- # We now know that we have a commit, so gather up the list of platforms
- # by channel, then print.
- by_channel = {}
- for revision in revisions:
- channel = revision['channel']
- if revision['commit'] < commit:
- continue
- if not channel in by_channel:
- by_channel[revision['channel']] = " %6s:" % channel
- by_channel[channel] += " %s," % revision['platform']
- if not by_channel:
- return "%5s %s (r%d)\n... not yet released in any Chrome channels.\n" % (bug.id(), bug.title(), commit)
- retval = "%5s %s (r%d)\n" % (bug.id(), bug.title(), commit)
- for channel in self.chrome_channels:
- if channel in by_channel:
- retval += by_channel[channel][:-1]
- retval += "\n"
- return retval
-
- def execute(self, options, args, tool):
- search_string = args[0]
- revisions = self._omahaproxy.get_revisions()
- bugs = tool.bugs.queries.fetch_bugs_matching_quicksearch(search_string)
- if not bugs:
- print "No bugs found matching '%s'" % search_string
- return
- for bug in bugs:
- print self._channels_for_bug(revisions, bug),
diff --git a/Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py b/Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py
deleted file mode 100644
index 037aebbfe..000000000
--- a/Tools/Scripts/webkitpy/tool/commands/chromechannels_unittest.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) 2012 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.tool.commands.chromechannels import ChromeChannels
-from webkitpy.tool.commands.commandtest import CommandsTest
-from webkitpy.tool.mocktool import MockTool
-from webkitpy.common.net.omahaproxy import OmahaProxy
-
-
-class MockOmahaProxy(OmahaProxy):
- revisions = [{"commit": 20, "channel": "canary", "platform": "Mac", "date": "07/04/76"},
- {"commit": 20, "channel": "canary", "platform": "Windows", "date": "07/04/76"},
- {"commit": 25, "channel": "dev", "platform": "Mac", "date": "07/01/76"},
- {"commit": 30, "channel": "dev", "platform": "Windows", "date": "03/29/82"},
- {"commit": 30, "channel": "dev", "platform": "Linux", "date": "03/29/82"},
- {"commit": 15, "channel": "beta", "platform": "Windows", "date": "07/04/67"},
- {"commit": 15, "channel": "beta", "platform": "Linux", "date": "07/04/67"},
- {"commit": 10, "channel": "stable", "platform": "Windows", "date": "07/01/67"},
- {"commit": 20, "channel": "stable", "platform": "Linux", "date": "09/16/10"},
- ]
-
- def get_revisions(self):
- return self.revisions
-
-
-class TestableChromeChannels(ChromeChannels):
- def __init__(self):
- ChromeChannels.__init__(self)
- self._omahaproxy = MockOmahaProxy()
-
-
-class ChromeChannelsTest(CommandsTest):
-
- single_bug_expectations = {
- 50001: """50001 Bug with a patch needing review. (r35)
-... not yet released in any Chrome channels.
-""",
- 50002: """50002 The third bug
-... has too confusing a commit history to parse, skipping
-""",
- 50003: """50003 The fourth bug
-... does not appear to have an associated commit.
-""",
- 50004: """50004 The fifth bug (r15)
- canary: Mac, Windows
- dev: Mac, Windows, Linux
- beta: Windows, Linux
- stable: Linux
-"""}
-
- def test_single_bug(self):
- testable_chrome_channels = TestableChromeChannels()
- tool = MockTool()
- testable_chrome_channels.bind_to_tool(tool)
- revisions = testable_chrome_channels._omahaproxy.get_revisions()
- for bug_id, expectation in self.single_bug_expectations.items():
- self.assertEqual(testable_chrome_channels._channels_for_bug(revisions, testable_chrome_channels._tool.bugs.fetch_bug(bug_id)),
- expectation)
-
- def test_with_query(self):
- expected_stdout = \
-"""50001 Bug with a patch needing review. (r35)
-... not yet released in any Chrome channels.
-50002 The third bug
-... has too confusing a commit history to parse, skipping
-50003 The fourth bug
-... does not appear to have an associated commit.
-50004 The fifth bug (r15)
- canary: Mac, Windows
- dev: Mac, Windows, Linux
- beta: Windows, Linux
- stable: Linux
-"""
- self.assert_execute_outputs(TestableChromeChannels(), ["foo"], expected_stdout=expected_stdout)
diff --git a/Tools/Scripts/webkitpy/tool/commands/commandtest.py b/Tools/Scripts/webkitpy/tool/commands/commandtest.py
index 65f45b58f..655c33fda 100644
--- a/Tools/Scripts/webkitpy/tool/commands/commandtest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/commandtest.py
@@ -42,6 +42,7 @@ class CommandsTest(TestCase):
options.obsolete_patches = True
options.open_bug = True
options.port = 'MOCK port'
+ options.update_changelogs = False
options.quiet = True
options.reviewer = 'MOCK reviewer'
command.bind_to_tool(tool)
diff --git a/Tools/Scripts/webkitpy/tool/commands/download.py b/Tools/Scripts/webkitpy/tool/commands/download.py
index bdd780d2c..85f576928 100644
--- a/Tools/Scripts/webkitpy/tool/commands/download.py
+++ b/Tools/Scripts/webkitpy/tool/commands/download.py
@@ -38,7 +38,7 @@ from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCom
from webkitpy.tool.commands.stepsequence import StepSequence
from webkitpy.tool.comments import bug_comment_from_commit_text
from webkitpy.tool.grammar import pluralize
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
@@ -47,7 +47,7 @@ class Clean(AbstractSequencedCommand):
name = "clean"
help_text = "Clean the working copy"
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
]
def _prepare_state(self, options, args, tool):
@@ -58,7 +58,7 @@ class Update(AbstractSequencedCommand):
name = "update"
help_text = "Update working copy (used internally)"
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
]
@@ -67,7 +67,7 @@ class Build(AbstractSequencedCommand):
name = "build"
help_text = "Update working copy and build"
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.Build,
]
@@ -80,7 +80,7 @@ class BuildAndTest(AbstractSequencedCommand):
name = "build-and-test"
help_text = "Update working copy, build, and run the tests"
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.Build,
steps.RunTests,
@@ -114,8 +114,9 @@ If a bug id is provided, or one can be found in the ChangeLog land will update t
}
-class LandCowboy(AbstractSequencedCommand):
- name = "land-cowboy"
+class LandCowhand(AbstractSequencedCommand):
+ # Gender-blind term for cowboy, see: http://en.wiktionary.org/wiki/cowhand
+ name = "land-cowhand"
help_text = "Prepares a ChangeLog and lands the current working directory diff."
steps = [
steps.PrepareChangeLog,
@@ -132,9 +133,12 @@ class LandCowboy(AbstractSequencedCommand):
options.check_style_filter = "-changelog"
-class LandCowhand(LandCowboy):
- # Gender-blind term for cowboy, see: http://en.wiktionary.org/wiki/cowhand
- name = "land-cowhand"
+class LandCowboy(LandCowhand):
+ name = "land-cowboy"
+
+ def _prepare_state(self, options, args, tool):
+ _log.warning("land-cowboy is deprecated, use land-cowhand instead.")
+ LandCowhand._prepare_state(self, options, args, tool)
class CheckStyleLocal(AbstractSequencedCommand):
@@ -145,11 +149,11 @@ class CheckStyleLocal(AbstractSequencedCommand):
]
-class AbstractPatchProcessingCommand(AbstractDeclarativeCommand):
+class AbstractPatchProcessingCommand(Command):
# Subclasses must implement the methods below. We don't declare them here
# because we want to be able to implement them with mix-ins.
#
- # pylint: disable-msg=E1101
+ # pylint: disable=E1101
# def _fetch_list_of_patches_to_process(self, options, args, tool):
# def _prepare_to_process(self, options, args, tool):
# def _process_patch(self, options, args, tool):
@@ -185,12 +189,22 @@ class AbstractPatchSequencingCommand(AbstractPatchProcessingCommand):
AbstractPatchProcessingCommand.__init__(self, options)
def _prepare_to_process(self, options, args, tool):
- self._prepare_sequence.run_and_handle_errors(tool, options)
+ try:
+ self.state = self._prepare_state(options, args, tool)
+ except ScriptError, e:
+ _log.error(e.message_with_output())
+ self._exit(e.exit_code or 2)
+ self._prepare_sequence.run_and_handle_errors(tool, options, self.state)
def _process_patch(self, patch, options, args, tool):
- state = { "patch" : patch }
+ state = {}
+ state.update(self.state or {})
+ state["patch"] = patch
self._main_sequence.run_and_handle_errors(tool, options, state)
+ def _prepare_state(self, options, args, tool):
+ return None
+
class ProcessAttachmentsMixin(object):
def _fetch_list_of_patches_to_process(self, options, args, tool):
@@ -235,7 +249,7 @@ class CheckStyle(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
help_text = "Run check-webkit-style on the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.CheckStyle,
@@ -247,7 +261,7 @@ class BuildAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
help_text = "Apply and build patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.Build,
@@ -259,7 +273,7 @@ class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsM
help_text = "Apply, build, and test patches from bugzilla"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.Build,
@@ -270,7 +284,7 @@ class BuildAndTestAttachment(AbstractPatchSequencingCommand, ProcessAttachmentsM
class AbstractPatchApplyingCommand(AbstractPatchSequencingCommand):
prepare_steps = [
steps.EnsureLocalCommitIfNeeded,
- steps.CleanWorkingDirectoryWithLocalCommits,
+ steps.CleanWorkingDirectory,
steps.Update,
]
main_steps = [
@@ -299,7 +313,7 @@ class ApplyWatchList(AbstractPatchSequencingCommand, ProcessAttachmentsMixin):
help_text = "Applies the watchlist to the specified attachments"
argument_names = "ATTACHMENT_ID [ATTACHMENT_IDS]"
main_steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.ApplyWatchList,
@@ -310,7 +324,7 @@ Downloads the attachment, applies it locally, runs the watchlist against it, and
class AbstractPatchLandingCommand(AbstractPatchSequencingCommand):
main_steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.ApplyPatch,
steps.ValidateChangeLogs,
@@ -413,7 +427,7 @@ Applies the inverse diff for the provided revision(s).
Creates an appropriate rollout ChangeLog, including a trac link and bug link.
"""
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
@@ -424,7 +438,7 @@ class CreateRollout(AbstractRolloutPrepCommand):
name = "create-rollout"
help_text = "Creates a bug to track the broken SVN revision(s) and uploads a rollout patch."
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.CreateBug,
@@ -470,7 +484,7 @@ Opens the generated ChangeLogs in $EDITOR.
Shows the prepared diff for confirmation.
Commits the revert and updates the bug (including re-opening the bug if necessary)."""
steps = [
- steps.CleanWorkingDirectory,
+ steps.DiscardLocalChanges,
steps.Update,
steps.RevertRevision,
steps.PrepareChangeLogForRevert,
diff --git a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
index 14bf2ce5e..d35706f7e 100644
--- a/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/download_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
@@ -99,6 +99,7 @@ Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
"""
@@ -140,6 +141,7 @@ Message2."
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
@@ -153,7 +155,7 @@ Updating bug 50000
self.assertEqual(mock_tool.scm().create_patch.call_count, 0)
self.assertEqual(mock_tool.checkout().modified_changelogs.call_count, 1)
- def test_land_cowboy(self):
+ def test_land_cowhand(self):
expected_logs = """MOCK run_and_throw_if_fail: ['mock-prepare-ChangeLog', '--email=MOCK email', '--merge-base=None', 'MockFile1'], cwd=/mock-checkout
MOCK run_and_throw_if_fail: ['mock-check-webkit-style', '--git-commit', 'MOCK git commit', '--diff-files', 'MockFile1', '--filter', '-changelog'], cwd=/mock-checkout
MOCK run_command: ['ruby', '-I', '/mock-checkout/Websites/bugs.webkit.org/PrettyPatch', '/mock-checkout/Websites/bugs.webkit.org/PrettyPatch/prettify.rb'], cwd=None, input=Patch1
@@ -167,6 +169,8 @@ Running Perl unit tests
MOCK run_and_throw_if_fail: ['mock-test-webkitperl'], cwd=/mock-checkout
Running JavaScriptCore tests
MOCK run_and_throw_if_fail: ['mock-run-javacriptcore-tests'], cwd=/mock-checkout
+Running bindings generation tests
+MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
Running WebKit unit tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
Running run-webkit-tests
@@ -176,6 +180,9 @@ Committed r49824: <http://trac.webkit.org/changeset/49824>
No bug id provided.
"""
mock_tool = MockTool(log_executive=True)
+ self.assert_execute_outputs(LandCowhand(), [50000], options=self._default_options(), expected_logs=expected_logs, tool=mock_tool)
+
+ expected_logs = "land-cowboy is deprecated, use land-cowhand instead.\n" + expected_logs
self.assert_execute_outputs(LandCowboy(), [50000], options=self._default_options(), expected_logs=expected_logs, tool=mock_tool)
def test_land_red_builders(self):
@@ -183,6 +190,7 @@ No bug id provided.
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
@@ -214,6 +222,7 @@ Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
@@ -231,6 +240,7 @@ Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
@@ -241,6 +251,7 @@ Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
@@ -258,6 +269,7 @@ Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
@@ -268,6 +280,7 @@ Building WebKit
Running Python unit tests
Running Perl unit tests
Running JavaScriptCore tests
+Running bindings generation tests
Running WebKit unit tests
Running run-webkit-tests
Committed r49824: <http://trac.webkit.org/changeset/49824>
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
index 98a9a36ed..b5e285c64 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
@@ -26,11 +26,13 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import json
import logging
from optparse import make_option
from webkitpy.common.config.committers import CommitterList
from webkitpy.common.config.ports import DeprecatedPort
+from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.system.executive import ScriptError
from webkitpy.tool.bot.earlywarningsystemtask import EarlyWarningSystemTask, EarlyWarningSystemTaskDelegate
from webkitpy.tool.bot.expectedfailures import ExpectedFailures
@@ -45,22 +47,16 @@ _log = logging.getLogger(__name__)
class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDelegate):
_build_style = "release"
# FIXME: Switch _default_run_tests from opt-in to opt-out once more bots are ready to run tests.
- _default_run_tests = False
-
- # Subclasses must override.
- port_name = None
+ run_tests = False
def __init__(self):
- options = [make_option("--run-tests", action="store_true", dest="run_tests", default=self._default_run_tests, help="Run the Layout tests for each patch")]
+ options = [make_option("--run-tests", action="store_true", dest="run_tests", default=self.run_tests, help="Run the Layout tests for each patch")]
AbstractReviewQueue.__init__(self, options=options)
- self.port = DeprecatedPort.port(self.port_name)
def begin_work_queue(self):
- # FIXME: This violates abstraction
- self._tool._deprecated_port = self.port
AbstractReviewQueue.begin_work_queue(self)
self._expected_failures = ExpectedFailures()
- self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._log_directory())
+ self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._port.results_directory(), self._log_directory())
def _failing_tests_message(self, task, patch):
results = task.results_from_patch_test_run(patch)
@@ -72,11 +68,13 @@ class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDele
def _post_reject_message_on_bug(self, tool, patch, status_id, extra_message_text=None):
results_link = tool.status_server.results_url_for_status(status_id)
message = "Attachment %s did not pass %s (%s):\nOutput: %s" % (patch.id(), self.name, self.port_name, results_link)
+ if extra_message_text:
+ message += "\n\n%s" % extra_message_text
# FIXME: We might want to add some text about rejecting from the commit-queue in
# the case where patch.commit_queue() isn't already set to '-'.
if self.watchers:
tool.bugs.add_cc_to_bug(patch.bug_id(), self.watchers)
- tool.bugs.set_flag_on_attachment(patch.id(), "commit-queue", "-", message, extra_message_text)
+ tool.bugs.set_flag_on_attachment(patch.id(), "commit-queue", "-", message)
def review_patch(self, patch):
task = EarlyWarningSystemTask(self, patch, self._options.run_tests)
@@ -105,7 +103,7 @@ class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDele
return self.name
def run_command(self, command):
- self.run_webkit_patch(command + [self.port.flag()])
+ self.run_webkit_patch(command + [self._deprecated_port.flag()])
def command_passed(self, message, patch):
pass
@@ -139,78 +137,21 @@ class AbstractEarlyWarningSystem(AbstractReviewQueue, EarlyWarningSystemTaskDele
# FIXME: Why does this not exit(1) like the superclass does?
_log.error(script_error.message_with_output())
+ @classmethod
+ def load_ews_classes(cls):
+ filesystem = FileSystem()
+ json_path = filesystem.join(filesystem.dirname(filesystem.path_to_module('webkitpy.common.config')), 'ews.json')
+ try:
+ ewses = json.loads(filesystem.read_text_file(json_path))
+ except ValueError:
+ return None
-class GtkEWS(AbstractEarlyWarningSystem):
- name = "gtk-ews"
- port_name = "gtk"
- watchers = AbstractEarlyWarningSystem.watchers + [
- "xan.lopez@gmail.com",
- ]
-
-
-class EflEWS(AbstractEarlyWarningSystem):
- name = "efl-ews"
- port_name = "efl"
- watchers = AbstractEarlyWarningSystem.watchers + [
- "leandro@profusion.mobi",
- "antognolli@profusion.mobi",
- "lucas.demarchi@profusion.mobi",
- "gyuyoung.kim@samsung.com",
- ]
-
-
-class QtEWS(AbstractEarlyWarningSystem):
- name = "qt-ews"
- port_name = "qt"
- watchers = AbstractEarlyWarningSystem.watchers + [
- "webkit-ews@sed.inf.u-szeged.hu",
- ]
-
-
-class QtWK2EWS(AbstractEarlyWarningSystem):
- name = "qt-wk2-ews"
- port_name = "qt"
- watchers = AbstractEarlyWarningSystem.watchers + [
- "webkit-ews@sed.inf.u-szeged.hu",
- ]
-
-
-class WinEWS(AbstractEarlyWarningSystem):
- name = "win-ews"
- port_name = "win"
- # Use debug, the Apple Win port fails to link Release on 32-bit Windows.
- # https://bugs.webkit.org/show_bug.cgi?id=39197
- _build_style = "debug"
-
-
-class AbstractChromiumEWS(AbstractEarlyWarningSystem):
- port_name = "chromium"
- watchers = AbstractEarlyWarningSystem.watchers + [
- "dglazkov@chromium.org",
- ]
-
-
-class ChromiumLinuxEWS(AbstractChromiumEWS):
- # FIXME: We should rename this command to cr-linux-ews, but that requires
- # a database migration. :(
- name = "chromium-ews"
- port_name = "chromium-xvfb"
- _default_run_tests = True
-
-
-class ChromiumWindowsEWS(AbstractChromiumEWS):
- name = "cr-win-ews"
-
-
-class ChromiumAndroidEWS(AbstractChromiumEWS):
- name = "cr-android-ews"
- port_name = "chromium-android"
- watchers = AbstractChromiumEWS.watchers + [
- "peter+ews@chromium.org",
- ]
-
-
-class MacEWS(AbstractEarlyWarningSystem):
- name = "mac-ews"
- port_name = "mac"
- _default_run_tests = True
+ classes = []
+ for name, config in ewses.iteritems():
+ classes.append(type(str(name.replace(' ', '')), (AbstractEarlyWarningSystem,), {
+ 'name': config['port'] + '-ews',
+ 'port_name': config['port'],
+ 'watchers': config.get('watchers', []),
+ 'run_tests': config.get('runTests', cls.run_tests),
+ }))
+ return classes
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
index b33129a20..78dae3ba9 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
@@ -47,44 +47,45 @@ class AbstractEarlyWarningSystemTest(QueuesTest):
ews._expected_failures.unexpected_failures_observed = lambda results: set(["foo.html", "bar.html"])
task = Mock()
patch = ews._tool.bugs.fetch_attachment(10000)
- self.assertEqual(ews._failing_tests_message(task, patch), "New failing tests:\nbar.html\nfoo.html")
+ self.assertMultiLineEqual(ews._failing_tests_message(task, patch), "New failing tests:\nbar.html\nfoo.html")
-class EarlyWarningSytemTest(QueuesTest):
+class EarlyWarningSystemTest(QueuesTest):
def _default_expected_logs(self, ews):
- string_replacemnts = {
+ string_replacements = {
"name": ews.name,
"port": ews.port_name,
}
+ if ews.run_tests:
+ run_tests_line = "Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=%(port)s\n" % string_replacements
+ else:
+ run_tests_line = ""
+ string_replacements['run_tests_line'] = run_tests_line
+
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs(ews.name),
- "process_work_item": "MOCK: update_status: %(name)s Pass\nMOCK: release_work_item: %(name)s 10000\n" % string_replacemnts,
+ "process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
+Running: webkit-patch --status-host=example.com update --port=%(port)s
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=%(port)s
+Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=%(port)s
+%(run_tests_line)sMOCK: update_status: %(name)s Pass
+MOCK: release_work_item: %(name)s 10000
+""" % string_replacements,
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
}
return expected_logs
- def _test_builder_ews(self, ews):
+ def _test_ews(self, ews):
ews.bind_to_tool(MockTool())
options = Mock()
options.port = None
- options.run_tests = ews._default_run_tests
+ options.run_tests = ews.run_tests
self.assert_queue_outputs(ews, expected_logs=self._default_expected_logs(ews), options=options)
- def _test_testing_ews(self, ews):
- ews.test_results = lambda: None
- ews.bind_to_tool(MockTool())
- expected_logs = self._default_expected_logs(ews)
- self.assert_queue_outputs(ews, expected_logs=expected_logs)
-
- def test_builder_ewses(self):
- self._test_builder_ews(MacEWS())
- self._test_builder_ews(ChromiumWindowsEWS())
- self._test_builder_ews(ChromiumAndroidEWS())
- self._test_builder_ews(QtEWS())
- self._test_builder_ews(QtWK2EWS())
- self._test_builder_ews(GtkEWS())
- self._test_builder_ews(EflEWS())
-
- def test_testing_ewses(self):
- self._test_testing_ews(ChromiumLinuxEWS())
+ def test_ewses(self):
+ classes = AbstractEarlyWarningSystem.load_ews_classes()
+ self.assertTrue(classes)
+ self.maxDiff = None
+ for ews_class in classes:
+ self._test_ews(ews_class())
diff --git a/Tools/Scripts/webkitpy/tool/commands/findusers.py b/Tools/Scripts/webkitpy/tool/commands/findusers.py
index 4363c8cf2..ae4702373 100644
--- a/Tools/Scripts/webkitpy/tool/commands/findusers.py
+++ b/Tools/Scripts/webkitpy/tool/commands/findusers.py
@@ -26,10 +26,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
-class FindUsers(AbstractDeclarativeCommand):
+class FindUsers(Command):
name = "find-users"
help_text = "Find users matching substring"
diff --git a/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py b/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
index c87c1a265..e9762858d 100644
--- a/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
+++ b/Tools/Scripts/webkitpy/tool/commands/gardenomatic.py
@@ -22,7 +22,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.layout_tests.port import builders
+from webkitpy.port import builders
from webkitpy.tool.commands.rebaseline import AbstractRebaseliningCommand
from webkitpy.tool.servers.gardeningserver import GardeningHTTPServer
diff --git a/Tools/Scripts/webkitpy/tool/commands/newcommitbot.py b/Tools/Scripts/webkitpy/tool/commands/newcommitbot.py
new file mode 100644
index 000000000..958576158
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/newcommitbot.py
@@ -0,0 +1,172 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Copyright (c) 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.config.committers import CommitterList
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.tool.bot.irc_command import IRCCommand
+from webkitpy.tool.bot.irc_command import Help
+from webkitpy.tool.bot.irc_command import Hi
+from webkitpy.tool.bot.irc_command import PingPong
+from webkitpy.tool.bot.irc_command import Restart
+from webkitpy.tool.bot.irc_command import YouThere
+from webkitpy.tool.bot.ircbot import IRCBot
+from webkitpy.tool.commands.queues import AbstractQueue
+from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
+
+_log = logging.getLogger(__name__)
+
+
+class Agent(object):
+ def __init__(self, tool, newcommitbot):
+ self._tool = tool
+ self._newcommitbot = newcommitbot
+
+ def name(self):
+ return 'WKR'
+
+
+class NewCommitBot(AbstractQueue, StepSequenceErrorHandler):
+ name = "WKR"
+ watchers = AbstractQueue.watchers + ["rniwa@webkit.org"]
+
+ _commands = {
+ "hi": Hi,
+ "ping": PingPong,
+ "restart": Restart,
+ "yt?": YouThere,
+ }
+
+ _maximum_number_of_revisions_to_avoid_spamming_irc = 10
+
+ # AbstractQueue methods
+
+ def begin_work_queue(self):
+ AbstractQueue.begin_work_queue(self)
+ self._last_svn_revision = int(self._tool.scm().head_svn_revision())
+ self._irc_bot = IRCBot(self.name, self._tool, Agent(self._tool, self), self._commands)
+ self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
+
+ def work_item_log_path(self, failure_map):
+ return None
+
+ def next_work_item(self):
+ self._irc_bot.process_pending_messages()
+
+ _log.info('Last SVN revision: %d' % self._last_svn_revision)
+
+ count = 0
+ while count < self._maximum_number_of_revisions_to_avoid_spamming_irc:
+ new_revision = self._last_svn_revision + 1
+ try:
+ commit_log = self._tool.executive.run_command(['svn', 'log', 'https://svn.webkit.org/repository/webkit/trunk', '--non-interactive', '--revision',
+ self._tool.scm().strip_r_from_svn_revision(new_revision)])
+ except ScriptError:
+ break
+
+ self._last_svn_revision = new_revision
+ if self._is_empty_log(commit_log):
+ continue
+
+ count += 1
+ _log.info('Found revision %d' % new_revision)
+ self._tool.irc().post(self._summarize_commit_log(commit_log).encode('utf-8'))
+
+ def _is_empty_log(self, commit_log):
+ return re.match(r'^\-+$', commit_log)
+
+ def process_work_item(self, failure_map):
+ return True
+
+ _patch_by_regex = re.compile(r'^Patch\s+by\s+(?P<author>.+?)\s+on(\s+\d{4}-\d{2}-\d{2})?\n?', re.MULTILINE | re.IGNORECASE)
+ _rollout_regex = re.compile(r'(rolling out|reverting) (?P<revisions>r?\d+((,\s*|,?\s*and\s+)?r?\d+)*)\.?\s*', re.MULTILINE | re.IGNORECASE)
+ _requested_by_regex = re.compile(r'^\"?(?P<reason>.+?)\"? \(Requested\s+by\s+(?P<author>.+?)\s+on\s+#webkit\)\.', re.MULTILINE | re.IGNORECASE)
+ _bugzilla_url_regex = re.compile(r'http(s?)://bugs\.webkit\.org/show_bug\.cgi\?id=(?P<id>\d+)', re.MULTILINE)
+ _trac_url_regex = re.compile(r'http(s?)://trac.webkit.org/changeset/(?P<revision>\d+)', re.MULTILINE)
+
+ @classmethod
+ def _summarize_commit_log(self, commit_log, committer_list=CommitterList()):
+ patch_by = self._patch_by_regex.search(commit_log)
+ commit_log = self._patch_by_regex.sub('', commit_log, count=1)
+
+ rollout = self._rollout_regex.search(commit_log)
+ commit_log = self._rollout_regex.sub('', commit_log, count=1)
+
+ requested_by = self._requested_by_regex.search(commit_log)
+
+ commit_log = self._bugzilla_url_regex.sub(r'https://webkit.org/b/\g<id>', commit_log)
+ commit_log = self._trac_url_regex.sub(r'https://trac.webkit.org/r\g<revision>', commit_log)
+
+ for contributor in committer_list.contributors():
+ if not contributor.irc_nicknames:
+ continue
+ name_with_nick = "%s (%s)" % (contributor.full_name, contributor.irc_nicknames[0])
+ if contributor.full_name in commit_log:
+ commit_log = commit_log.replace(contributor.full_name, name_with_nick)
+ for email in contributor.emails:
+ commit_log = commit_log.replace(' <' + email + '>', '')
+ else:
+ for email in contributor.emails:
+ commit_log = commit_log.replace(email, name_with_nick)
+
+ lines = commit_log.split('\n')[1:-2] # Ignore lines with ----------.
+
+ firstline = re.match(r'^(?P<revision>r\d+) \| (?P<email>[^\|]+) \| (?P<timestamp>[^|]+) \| [^\n]+', lines[0])
+ assert firstline
+ author = firstline.group('email')
+ if patch_by:
+ author = patch_by.group('author')
+
+ linkified_revision = 'https://trac.webkit.org/%s' % firstline.group('revision')
+ lines[0] = '%s by %s' % (linkified_revision, author)
+
+ if rollout:
+ if requested_by:
+ author = requested_by.group('author')
+ contributor = committer_list.contributor_by_irc_nickname(author)
+ if contributor:
+ author = "%s (%s)" % (contributor.full_name, contributor.irc_nicknames[0])
+ return '%s rolled out %s in %s : %s' % (author, rollout.group('revisions'),
+ linkified_revision, requested_by.group('reason'))
+ lines[0] = '%s rolled out %s in %s' % (author, rollout.group('revisions'), linkified_revision)
+
+ return ' '.join(filter(lambda line: len(line), lines)[0:4])
+
+ def handle_unexpected_error(self, failure_map, message):
+ _log.error(message)
+
+ # StepSequenceErrorHandler methods
+
+ @classmethod
+ def handle_script_error(cls, tool, state, script_error):
+ # Ideally we would post some information to IRC about what went wrong
+ # here, but we don't have the IRC password in the child process.
+ pass
diff --git a/Tools/Scripts/webkitpy/tool/commands/newcommitbot_unittest.py b/Tools/Scripts/webkitpy/tool/commands/newcommitbot_unittest.py
new file mode 100644
index 000000000..05bf45664
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/newcommitbot_unittest.py
@@ -0,0 +1,129 @@
+# Copyright (C) 2013 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.tool.commands.newcommitbot import NewCommitBot
+
+
+class NewCommitBotTest(unittest.TestCase):
+ def test_summarize_commit_log_basic(self):
+ self.assertEqual(NewCommitBot._summarize_commit_log("""------------------------------------------------------------------------
+r143106 | jochen@chromium.org | 2013-02-16 10:27:07 -0800 (Sat, 16 Feb 2013) | 10 lines
+
+[chromium] initialize all variables of TestRunner classes
+https://bugs.webkit.org/show_bug.cgi?id=110013
+
+Reviewed by Adam Barth.
+
+* DumpRenderTree/chromium/TestRunner/src/TestInterfaces.cpp:
+(WebTestRunner::TestInterfaces::TestInterfaces):
+* DumpRenderTree/chromium/TestRunner/src/TestRunner.cpp:
+(WebTestRunner::TestRunner::TestRunner):
+
+------------------------------------------------------------------------"""),
+ "https://trac.webkit.org/r143106 by Jochen Eisinger (jochen__) [chromium] initialize all variables of TestRunner classes"
+ " https://webkit.org/b/110013 Reviewed by Adam Barth (abarth).")
+
+ self.assertEqual(NewCommitBot._summarize_commit_log("""------------------------------------------------------------------------
+r140066 | simon.fraser@apple.com | 2013-01-17 16:10:31 -0800 (Thu, 17 Jan 2013) | 10 lines
+
+Allow PaintInfo to carry all PaintBehavior flags
+https://bugs.webkit.org/show_bug.cgi?id=106980
+
+Reviewed by Beth Dakin.
+
+In r139908 I missed one instance of the PaintInfo constructor that should take PaintBehaviorNormal
+instead of "false".
+
+* rendering/RenderScrollbarPart.cpp:
+(WebCore::RenderScrollbarPart::paintIntoRect):
+------------------------------------------------------------------------"""),
+ "https://trac.webkit.org/r140066 by Simon Fraser (smfr)"
+ " Allow PaintInfo to carry all PaintBehavior flags https://webkit.org/b/106980 Reviewed by Beth Dakin (dethbakin).")
+
+ def test_summarize_commit_log_rollout(self):
+ self.assertEqual(NewCommitBot._summarize_commit_log("""------------------------------------------------------------------------
+r143104 | commit-queue@webkit.org | 2013-02-16 09:09:01 -0800 (Sat, 16 Feb 2013) | 27 lines
+
+Unreviewed, rolling out r142734.
+http://trac.webkit.org/changeset/142734
+https://bugs.webkit.org/show_bug.cgi?id=110018
+
+"Triggered crashes on lots of websites" (Requested by ggaren
+on #webkit).
+
+Patch by Sheriff Bot <webkit.review.bot@gmail.com> on 2013-02-16
+
+Source/WebCore:
+
+------------------------------------------------------------------------"""),
+ "Geoffrey Garen (ggaren) rolled out r142734 in https://trac.webkit.org/r143104 : Triggered crashes on lots of websites")
+
+ self.assertEqual(NewCommitBot._summarize_commit_log("""------------------------------------------------------------------------
+r139884 | kov@webkit.org | 2013-01-16 08:26:10 -0800 (Wed, 16 Jan 2013) | 23 lines
+
+[GStreamer][Soup] Let GStreamer provide the buffer data is downloaded to, to avoid copying
+https://bugs.webkit.org/show_bug.cgi?id=105552
+
+Reverting 139877. It made a couple of API tests fail.
+
+* platform/graphics/gstreamer/GStreamerVersioning.cpp:
+* platform/graphics/gstreamer/GStreamerVersioning.h:
+* platform/graphics/gstreamer/WebKitWebSourceGStreamer.cpp:
+(StreamingClient):
+(_WebKitWebSrcPrivate):
+
+------------------------------------------------------------------------"""),
+ "Gustavo Noronha Silva (kov) rolled out 139877 in https://trac.webkit.org/r139884"
+ " [GStreamer][Soup] Let GStreamer provide the buffer data is downloaded to, to avoid copying"
+ " https://webkit.org/b/105552 It made a couple of API tests fail.")
+
+ self.assertEqual(NewCommitBot._summarize_commit_log("""------------------------------------------------------------------------
+r135487 | commit-queue@webkit.org | 2012-11-22 00:09:25 -0800 (Thu, 22 Nov 2012) | 52 lines
+
+Unreviewed, rolling out r134927 and r134944.
+http://trac.webkit.org/changeset/134927
+http://trac.webkit.org/changeset/134944
+https://bugs.webkit.org/show_bug.cgi?id=103028
+
+Reverting the reverts after merging. (Requested by vsevik on
+#webkit).
+
+Patch by Sheriff Bot <webkit.review.bot@gmail.com> on 2012-11-22
+
+* English.lproj/localizedStrings.js:
+* WebCore.gypi:
+* WebCore.vcproj/WebCore.vcproj:
+* inspector/compile-front-end.py:
+* inspector/front-end/AdvancedSearchController.js:
+* inspector/front-end/CallStackSidebarPane.js:
+
+------------------------------------------------------------------------"""),
+ "Vsevolod Vlasov (vsevik) rolled out r134927 and r134944 in https://trac.webkit.org/r135487 :"
+ " Reverting the reverts after merging.")
diff --git a/Tools/Scripts/webkitpy/tool/commands/openbugs.py b/Tools/Scripts/webkitpy/tool/commands/openbugs.py
index 8c55aba14..b2ed532e6 100644
--- a/Tools/Scripts/webkitpy/tool/commands/openbugs.py
+++ b/Tools/Scripts/webkitpy/tool/commands/openbugs.py
@@ -30,12 +30,12 @@ import logging
import re
import sys
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
-class OpenBugs(AbstractDeclarativeCommand):
+class OpenBugs(Command):
name = "open-bugs"
help_text = "Finds all bug numbers passed in arguments (or stdin if no args provided) and opens them in a web browser"
diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
index feb7b05b3..3efb46129 100644
--- a/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
@@ -26,12 +26,12 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.buildbot import Builder
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
-from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.port.test import TestPort
from webkitpy.tool.commands.perfalizer import PerfalizerTask
from webkitpy.tool.mocktool import MockTool
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries.py b/Tools/Scripts/webkitpy/tool/commands/queries.py
index 7cc846715..ff1b46ef2 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries.py
@@ -1,6 +1,7 @@
# Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
# Copyright (c) 2012 Intel Corporation. All rights reserved.
+# Copyright (c) 2013 University of Szeged. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -41,33 +42,31 @@ from webkitpy.common.checkout.commitinfo import CommitInfo
from webkitpy.common.config.committers import CommitterList
import webkitpy.common.config.urls as config_urls
from webkitpy.common.net.buildbot import BuildBot
+from webkitpy.common.net.bugzilla import Bugzilla
from webkitpy.common.net.regressionwindow import RegressionWindow
from webkitpy.common.system.crashlogs import CrashLogs
from webkitpy.common.system.user import User
+from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.grammar import pluralize
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
from webkitpy.layout_tests.models.test_expectations import TestExpectations
-from webkitpy.layout_tests.port import platform_options, configuration_options
+from webkitpy.port import platform_options, configuration_options
_log = logging.getLogger(__name__)
-class SuggestReviewers(AbstractDeclarativeCommand):
+class SuggestReviewers(AbstractSequencedCommand):
name = "suggest-reviewers"
help_text = "Suggest reviewers for a patch based on recent changes to the modified files."
+ steps = [
+ steps.SuggestReviewers,
+ ]
- def __init__(self):
- options = [
- steps.Options.git_commit,
- ]
- AbstractDeclarativeCommand.__init__(self, options=options)
-
- def execute(self, options, args, tool):
- reviewers = tool.checkout().suggested_reviewers(options.git_commit)
- print "\n".join([reviewer.full_name for reviewer in reviewers])
+ def _prepare_state(self, options, args, tool):
+ options.suggest_reviewers = True
-class BugsToCommit(AbstractDeclarativeCommand):
+class BugsToCommit(Command):
name = "bugs-to-commit"
help_text = "List bugs in the commit-queue"
@@ -78,7 +77,7 @@ class BugsToCommit(AbstractDeclarativeCommand):
print "%s" % bug_id
-class PatchesInCommitQueue(AbstractDeclarativeCommand):
+class PatchesInCommitQueue(Command):
name = "patches-in-commit-queue"
help_text = "List patches in the commit-queue"
@@ -89,14 +88,14 @@ class PatchesInCommitQueue(AbstractDeclarativeCommand):
print patch.url()
-class PatchesToCommitQueue(AbstractDeclarativeCommand):
+class PatchesToCommitQueue(Command):
name = "patches-to-commit-queue"
help_text = "Patches which should be added to the commit queue"
def __init__(self):
options = [
make_option("--bugs", action="store_true", dest="bugs", help="Output bug links instead of patch links"),
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
@staticmethod
def _needs_commit_queue(patch):
@@ -123,7 +122,7 @@ class PatchesToCommitQueue(AbstractDeclarativeCommand):
print "%s" % tool.bugs.attachment_url_for_id(patch.id(), action="edit")
-class PatchesToReview(AbstractDeclarativeCommand):
+class PatchesToReview(Command):
name = "patches-to-review"
help_text = "List bugs which have attachments pending review"
@@ -136,7 +135,7 @@ class PatchesToReview(AbstractDeclarativeCommand):
make_option("--cc-email",
help="Specifies the email on the CC field (defaults to your bugzilla login email)"),
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
def _print_report(self, report, cc_email, print_all):
if print_all:
@@ -176,7 +175,8 @@ class PatchesToReview(AbstractDeclarativeCommand):
report = self._generate_report(bugs, options.include_cq_denied)
self._print_report(report, cc_email, options.all)
-class WhatBroke(AbstractDeclarativeCommand):
+
+class WhatBroke(Command):
name = "what-broke"
help_text = "Print failing buildbots (%s) and what revisions broke them" % config_urls.buildbot_url
@@ -222,7 +222,7 @@ class WhatBroke(AbstractDeclarativeCommand):
print "All builders are passing!"
-class ResultsFor(AbstractDeclarativeCommand):
+class ResultsFor(Command):
name = "results-for"
help_text = "Print a list of failures for the passed revision from bots on %s" % config_urls.buildbot_url
argument_names = "REVISION"
@@ -244,7 +244,7 @@ class ResultsFor(AbstractDeclarativeCommand):
self._print_layout_test_results(build.layout_test_results())
-class FailureReason(AbstractDeclarativeCommand):
+class FailureReason(Command):
name = "failure-reason"
help_text = "Lists revisions where individual test failures started at %s" % config_urls.buildbot_url
@@ -330,7 +330,7 @@ class FailureReason(AbstractDeclarativeCommand):
return self._explain_failures_for_builder(builder, start_revision=int(start_revision))
-class FindFlakyTests(AbstractDeclarativeCommand):
+class FindFlakyTests(Command):
name = "find-flaky-tests"
help_text = "Lists tests that often fail for a single build at %s" % config_urls.buildbot_url
@@ -399,7 +399,7 @@ class FindFlakyTests(AbstractDeclarativeCommand):
return self._walk_backwards_from(builder, latest_revision, limit=int(limit))
-class TreeStatus(AbstractDeclarativeCommand):
+class TreeStatus(Command):
name = "tree-status"
help_text = "Print the status of the %s buildbots" % config_urls.buildbot_url
long_help = """Fetches build status from http://build.webkit.org/one_box_per_builder
@@ -411,7 +411,7 @@ and displayes the status of each builder."""
print "%s : %s" % (status_string.ljust(4), builder["name"])
-class CrashLog(AbstractDeclarativeCommand):
+class CrashLog(Command):
name = "crash-log"
help_text = "Print the newest crash log for the given process"
long_help = """Finds the newest crash log matching the given process name
@@ -426,7 +426,7 @@ and PID and prints it to stdout."""
print crash_logs.find_newest_log(args[0], pid)
-class PrintExpectations(AbstractDeclarativeCommand):
+class PrintExpectations(Command):
name = 'print-expectations'
help_text = 'Print the expected result for the given test(s) on the given port(s)'
@@ -446,7 +446,7 @@ class PrintExpectations(AbstractDeclarativeCommand):
help='display the paths for all applicable expectation files'),
] + platform_options(use_globs=True)
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
self._expectation_models = {}
def execute(self, options, args, tool):
@@ -514,13 +514,10 @@ class PrintExpectations(AbstractDeclarativeCommand):
def _model(self, options, port_name, tests):
port = self._tool.port_factory.get(port_name, options)
- expectations_path = port.path_to_test_expectations_file()
- if not expectations_path in self._expectation_models:
- self._expectation_models[expectations_path] = TestExpectations(port, tests).model()
- return self._expectation_models[expectations_path]
+ return TestExpectations(port, tests).model()
-class PrintBaselines(AbstractDeclarativeCommand):
+class PrintBaselines(Command):
name = 'print-baselines'
help_text = 'Prints the baseline locations for given test(s) on the given port(s)'
@@ -533,7 +530,7 @@ class PrintBaselines(AbstractDeclarativeCommand):
make_option('--include-virtual-tests', action='store_true',
help='Include virtual tests'),
] + platform_options(use_globs=True)
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
self._platform_regexp = re.compile('platform/([^\/]+)/(.+)')
def execute(self, options, args, tool):
@@ -579,3 +576,36 @@ class PrintBaselines(AbstractDeclarativeCommand):
if platform_matchobj:
return platform_matchobj.group(1)
return None
+
+
+class FindResolvedBugs(Command):
+ name = "find-resolved-bugs"
+ help_text = "Collect the RESOLVED bugs in the given TestExpectations file"
+ argument_names = "TEST_EXPECTATIONS_FILE"
+
+ def execute(self, options, args, tool):
+ filename = args[0]
+ if not tool.filesystem.isfile(filename):
+ print "The given path is not a file, please pass a valid path."
+ return
+
+ ids = set()
+ inputfile = tool.filesystem.open_text_file_for_reading(filename)
+ for line in inputfile:
+ result = re.search("(https://bugs\.webkit\.org/show_bug\.cgi\?id=|webkit\.org/b/)([0-9]+)", line)
+ if result:
+ ids.add(result.group(2))
+ inputfile.close()
+
+ resolved_ids = set()
+ num_of_bugs = len(ids)
+ bugzilla = Bugzilla()
+ for i, bugid in enumerate(ids, start=1):
+ bug = bugzilla.fetch_bug(bugid)
+ print "Checking bug %s \t [%d/%d]" % (bugid, i, num_of_bugs)
+ if not bug.is_open():
+ resolved_ids.add(bugid)
+
+ print "Resolved bugs in %s :" % (filename)
+ for bugid in resolved_ids:
+ print "https://bugs.webkit.org/show_bug.cgi?id=%s" % (bugid)
diff --git a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
index b252c0b0e..8800cac3b 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queries_unittest.py
@@ -27,13 +27,13 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.bugzilla import Bugzilla
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
-from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.port.test import TestPort
from webkitpy.tool.commands.commandtest import CommandsTest
from webkitpy.tool.commands.queries import *
from webkitpy.tool.mocktool import MockTool, MockOptions
@@ -173,7 +173,7 @@ class PrintExpectationsTest(unittest.TestCase):
command.execute(options, tests, tool)
finally:
stdout, _, _ = oc.restore_output()
- self.assertEqual(stdout, expected_stdout)
+ self.assertMultiLineEqual(stdout, expected_stdout)
def test_basic(self):
self.run_test(['failures/expected/text.html', 'failures/expected/image.html'],
@@ -223,10 +223,19 @@ class PrintExpectationsTest(unittest.TestCase):
def test_paths(self):
self.run_test([],
- ('LayoutTests/platform/test/TestExpectations\n'
+ ('LayoutTests/TestExpectations\n'
+ 'LayoutTests/platform/test/TestExpectations\n'
'LayoutTests/platform/test-win-xp/TestExpectations\n'),
paths=True)
+ def test_platform(self):
+ self.run_test(['platform/test-mac-leopard/http/test.html'],
+ ('// For test-mac-snowleopard\n'
+ 'platform/test-mac-leopard [ Pass Skip WontFix ]\n' # Note that this is the expectation (from being skipped internally), not the test name
+ '\n'
+ '// For test-mac-leopard\n'
+ 'platform/test-mac-leopard/http/test.html [ Pass ]\n'),
+ platform='test-mac-*')
class PrintBaselinesTest(unittest.TestCase):
def setUp(self):
@@ -255,7 +264,7 @@ class PrintBaselinesTest(unittest.TestCase):
self.capture_output()
command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform=None), ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
- self.assertEqual(stdout,
+ self.assertMultiLineEqual(stdout,
('// For test-win-xp\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'))
@@ -266,7 +275,7 @@ class PrintBaselinesTest(unittest.TestCase):
self.capture_output()
command.execute(MockOptions(all=False, include_virtual_tests=False, csv=False, platform='test-win-*'), ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
- self.assertEqual(stdout,
+ self.assertMultiLineEqual(stdout,
('// For test-win-vista\n'
'passes/text-expected.png\n'
'passes/text-expected.txt\n'
@@ -285,6 +294,6 @@ class PrintBaselinesTest(unittest.TestCase):
self.capture_output()
command.execute(MockOptions(all=False, platform='*xp', csv=True, include_virtual_tests=False), ['passes/text.html'], self.tool)
stdout, _, _ = self.restore_output()
- self.assertEqual(stdout,
+ self.assertMultiLineEqual(stdout,
('test-win-xp,passes/text.html,None,png,passes/text-expected.png,None\n'
'test-win-xp,passes/text.html,None,txt,passes/text-expected.txt,None\n'))
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py
index edfbee402..74724cffb 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues.py
@@ -72,7 +72,8 @@ class AbstractQueue(Command, QueueEngineDelegate):
make_option("--no-confirm", action="store_false", dest="confirm", default=True, help="Do not ask the user for confirmation before running the queue. Dangerous!"),
make_option("--exit-after-iteration", action="store", type="int", dest="iterations", default=None, help="Stop running the queue after iterating this number of times."),
]
- Command.__init__(self, "Run the %s" % self.name, options=options_list)
+ self.help_text = "Run the %s" % self.name
+ Command.__init__(self, options=options_list)
self._iteration_count = 0
def _cc_watchers(self, bug_id):
@@ -94,12 +95,17 @@ class AbstractQueue(Command, QueueEngineDelegate):
if self._options.port:
webkit_patch_args += ["--port=%s" % self._options.port]
webkit_patch_args.extend(args)
- # FIXME: There is probably no reason to use run_and_throw_if_fail anymore.
- # run_and_throw_if_fail was invented to support tee'd output
- # (where we write both to a log file and to the console at once),
- # but the queues don't need live-progress, a dump-of-output at the
- # end should be sufficient.
- return self._tool.executive.run_and_throw_if_fail(webkit_patch_args, cwd=self._tool.scm().checkout_root)
+
+ try:
+ args_for_printing = list(webkit_patch_args)
+ args_for_printing[0] = 'webkit-patch' # Printing our path for each log is redundant.
+ _log.info("Running: %s" % self._tool.executive.command_for_printing(args_for_printing))
+ command_output = self._tool.executive.run_command(webkit_patch_args, cwd=self._tool.scm().checkout_root)
+ except ScriptError, e:
+ # Make sure the whole output gets printed if the command failed.
+ _log.error(e.message_with_output(output_limit=None))
+ raise
+ return command_output
def _log_directory(self):
return os.path.join("..", "%s-logs" % self.name)
@@ -143,7 +149,7 @@ class AbstractQueue(Command, QueueEngineDelegate):
def execute(self, options, args, tool, engine=QueueEngine):
self._options = options # FIXME: This code is wrong. Command.options is a list, this assumes an Options element!
self._tool = tool # FIXME: This code is wrong too! Command.bind_to_tool handles this!
- return engine(self.name, self, self._tool.wakeup_event).run()
+ return engine(self.name, self, self._tool.wakeup_event, self._options.seconds_to_sleep).run()
@classmethod
def _log_from_script_error_for_upload(cls, script_error, output_limit=None):
@@ -241,10 +247,44 @@ class AbstractPatchQueue(AbstractQueue):
self._update_status(message, patch)
self._release_work_item(patch)
- # FIXME: This probably belongs at a layer below AbstractPatchQueue, but shared by CommitQueue and the EarlyWarningSystem.
+ def work_item_log_path(self, patch):
+ return os.path.join(self._log_directory(), "%s.log" % patch.bug_id())
+
+
+# Used to share code between the EWS and commit-queue.
+class PatchProcessingQueue(AbstractPatchQueue):
+ # Subclasses must override.
+ port_name = None
+
+ def __init__(self, options=None):
+ self._port = None # We can't instantiate port here because tool isn't avaialble.
+ AbstractPatchQueue.__init__(self, options)
+
+ # FIXME: This is a hack to map between the old port names and the new port names.
+ def _new_port_name_from_old(self, port_name, platform):
+ # ApplePort.determine_full_port_name asserts if the name doesn't include version.
+ if port_name == 'mac':
+ return 'mac-' + platform.os_version
+ if port_name == 'win':
+ return 'win-future'
+ return port_name
+
+ def begin_work_queue(self):
+ AbstractPatchQueue.begin_work_queue(self)
+ if not self.port_name:
+ return
+ # FIXME: This is only used for self._deprecated_port.flag()
+ self._deprecated_port = DeprecatedPort.port(self.port_name)
+ # FIXME: This violates abstraction
+ self._tool._deprecated_port = self._deprecated_port
+ self._port = self._tool.port_factory.get(self._new_port_name_from_old(self.port_name, self._tool.platform))
+
def _upload_results_archive_for_patch(self, patch, results_archive_zip):
+ if not self._port:
+ self._port = self._tool.port_factory.get(self._new_port_name_from_old(self.port_name, self._tool.platform))
+
bot_id = self._tool.status_server.bot_id or "bot"
- description = "Archive of layout-test-results from %s" % bot_id
+ description = "Archive of layout-test-results from %s for %s" % (bot_id, self._port.name())
# results_archive is a ZipFile object, grab the File object (.fp) to pass to Mechanize for uploading.
results_archive_file = results_archive_zip.fp
# Rewind the file object to start (since Mechanize won't do that automatically)
@@ -255,30 +295,21 @@ class AbstractPatchQueue(AbstractQueue):
comment_text = "The attached test failures were seen while running run-webkit-tests on the %s.\n" % (self.name)
# FIXME: We could easily list the test failures from the archive here,
# currently callers do that separately.
- comment_text += BotInfo(self._tool).summary_text()
+ comment_text += BotInfo(self._tool, self._port.name()).summary_text()
self._tool.bugs.add_attachment_to_bug(patch.bug_id(), results_archive_file, description, filename="layout-test-results.zip", comment_text=comment_text)
- def work_item_log_path(self, patch):
- return os.path.join(self._log_directory(), "%s.log" % patch.bug_id())
-
-class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskDelegate):
+class CommitQueue(PatchProcessingQueue, StepSequenceErrorHandler, CommitQueueTaskDelegate):
name = "commit-queue"
- port_name = "chromium-xvfb"
-
- def __init__(self):
- AbstractPatchQueue.__init__(self)
- self.port = DeprecatedPort.port(self.port_name)
+ port_name = "mac-mountainlion"
# AbstractPatchQueue methods
def begin_work_queue(self):
- # FIXME: This violates abstraction
- self._tool._deprecated_port = self.port
- AbstractPatchQueue.begin_work_queue(self)
+ PatchProcessingQueue.begin_work_queue(self)
self.committer_validator = CommitterValidator(self._tool)
self._expected_failures = ExpectedFailures()
- self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._log_directory())
+ self._layout_test_results_reader = LayoutTestResultsReader(self._tool, self._port.results_directory(), self._log_directory())
def next_work_item(self):
return self._next_patch()
@@ -319,7 +350,7 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD
# CommitQueueTaskDelegate methods
def run_command(self, command):
- self.run_webkit_patch(command + [self.port.flag()])
+ self.run_webkit_patch(command + [self._deprecated_port.flag()])
def command_passed(self, message, patch):
self._update_status(message, patch=patch)
@@ -348,10 +379,10 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD
reporter.report_flaky_tests(patch, flaky_test_results, results_archive)
def did_pass_testing_ews(self, patch):
- # Currently, chromium-ews is the only testing EWS. Once there are more,
- # should make sure they all pass.
- status = self._tool.status_server.patch_status("chromium-ews", patch.id())
- return status == self._pass_status
+ # Only Mac and Mac WK2 run tests
+ # FIXME: We shouldn't have to hard-code it here.
+ patch_status = self._tool.status_server.patch_status
+ return patch_status("mac-ews", patch.id()) == self._pass_status or patch_status("mac-wk2-ews", patch.id()) == self._pass_status
# StepSequenceErrorHandler methods
@@ -376,10 +407,10 @@ class CommitQueue(AbstractPatchQueue, StepSequenceErrorHandler, CommitQueueTaskD
raise TryAgain()
-class AbstractReviewQueue(AbstractPatchQueue, StepSequenceErrorHandler):
+class AbstractReviewQueue(PatchProcessingQueue, StepSequenceErrorHandler):
"""This is the base-class for the EWS queues and the style-queue."""
def __init__(self, options=None):
- AbstractPatchQueue.__init__(self, options)
+ PatchProcessingQueue.__init__(self, options)
def review_patch(self, patch):
raise NotImplementedError("subclasses must implement")
@@ -387,7 +418,7 @@ class AbstractReviewQueue(AbstractPatchQueue, StepSequenceErrorHandler):
# AbstractPatchQueue methods
def begin_work_queue(self):
- AbstractPatchQueue.begin_work_queue(self)
+ PatchProcessingQueue.begin_work_queue(self)
def next_work_item(self):
return self._next_patch()
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
index 0a32f29be..a09164dde 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
@@ -88,7 +88,7 @@ class AbstractQueueTest(CommandsTest):
if port:
expected_run_args.append("--port=%s" % port)
expected_run_args.extend(run_args)
- tool.executive.run_and_throw_if_fail.assert_called_with(expected_run_args, cwd='/mock-checkout')
+ tool.executive.run_command.assert_called_with(expected_run_args, cwd='/mock-checkout')
def test_run_webkit_patch(self):
self._assert_run_webkit_patch([1])
@@ -134,7 +134,7 @@ class FeederQueueTest(QueuesTest):
"begin_work_queue": self._default_begin_work_queue_logs("feeder-queue"),
"process_work_item": """Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
Warning, attachment 10001 on bug 50000 has invalid committer (non-committer@example.com)
-MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.' and additional comment 'non-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
+MOCK setting flag 'commit-queue' to '-' on attachment '10001' with comment 'Rejecting attachment 10001 from commit-queue.\n\nnon-committer@example.com does not have committer permissions according to http://trac.webkit.org/browser/trunk/Tools/Scripts/webkitpy/common/config/committers.py.
- If you do not have committer rights please read http://webkit.org/coding/contributing.html for instructions on how to use bugzilla flags.
@@ -156,7 +156,7 @@ class AbstractPatchQueueTest(CommandsTest):
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
- self.assertEqual(queue._next_patch(), None)
+ self.assertIsNone(queue._next_patch())
tool.status_server = MockStatusServer(work_items=[2, 10000, 10001])
expected_stdout = "MOCK: fetch_attachment: 2 is not a known attachment id\n" # A mock-only message to prevent us from making mistakes.
expected_logs = "MOCK: release_work_item: None 2\n"
@@ -166,18 +166,20 @@ class AbstractPatchQueueTest(CommandsTest):
self.assertEqual(queue._next_patch().id(), 10001)
self.assertEqual(queue._next_patch(), None) # When the queue is empty
+
+class PatchProcessingQueueTest(CommandsTest):
def test_upload_results_archive_for_patch(self):
- queue = AbstractPatchQueue()
+ queue = PatchProcessingQueue()
queue.name = "mock-queue"
tool = MockTool()
queue.bind_to_tool(tool)
queue._options = Mock()
queue._options.port = None
patch = queue._tool.bugs.fetch_attachment(10001)
- expected_logs = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
+ expected_logs = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot for mac-snowleopard filename=layout-test-results.zip mimetype=None
-- Begin comment --
The attached test failures were seen while running run-webkit-tests on the mock-queue.
-Port: MockPort Platform: MockPlatform 1.0
+Port: mac-snowleopard Platform: MockPlatform 1.0
-- End comment --
"""
OutputCapture().assert_outputs(self, queue._upload_results_archive_for_patch, [patch, Mock()], expected_logs=expected_logs)
@@ -236,18 +238,25 @@ class CommitQueueTest(QueuesTest):
tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
- "process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
+ "process_work_item": """Running: webkit-patch --status-host=example.com clean --port=mac
+MOCK: update_status: commit-queue Cleaned working directory
+Running: webkit-patch --status-host=example.com update --port=mac
MOCK: update_status: commit-queue Updated working directory
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue Applied patch
+Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue ChangeLog validated
+Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=mac
MOCK: update_status: commit-queue Built patch
+Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=mac
MOCK: update_status: commit-queue Passed tests
+Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10000 --port=mac
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
- "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+ "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_logs=expected_logs)
@@ -257,13 +266,13 @@ MOCK: release_work_item: commit-queue 10000
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
-MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'MOCK script error
+MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMOCK script error
Full output: http://dummy_url'
MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
- "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+ "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
queue = CommitQueue()
@@ -283,7 +292,7 @@ MOCK: release_work_item: commit-queue 10000
"process_work_item": """MOCK: update_status: commit-queue Cleaned working directory
MOCK: update_status: commit-queue Updated working directory
MOCK: update_status: commit-queue Patch does not apply
-MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'New failing tests:
+MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nNew failing tests:
mock_test_name.html
another_test_name.html
Full output: http://dummy_url'
@@ -291,7 +300,7 @@ MOCK: update_status: commit-queue Fail
MOCK: release_work_item: commit-queue 10000
""",
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
- "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+ "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
queue = CommitQueue()
@@ -307,56 +316,56 @@ MOCK: release_work_item: commit-queue 10000
self.assert_queue_outputs(queue, expected_logs=expected_logs)
def test_rollout(self):
- tool = MockTool(log_executive=True)
+ tool = MockTool()
tool.filesystem.write_text_file('/tmp/layout-test-results/full_results.json', '') # Otherwise the commit-queue will hit a KeyError trying to read the results from the MockFileSystem.
tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
tool.buildbot.light_tree_on_fire()
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
- "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean', '--port=%(port_name)s'], cwd=/mock-checkout
+ "process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
MOCK: update_status: commit-queue Cleaned working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update', '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com update --port=%(port)s
MOCK: update_status: commit-queue Updated working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10000, '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=%(port)s
MOCK: update_status: commit-queue Applied patch
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'validate-changelog', '--non-interactive', 10000, '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=%(port)s
MOCK: update_status: commit-queue ChangeLog validated
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build', '--no-clean', '--no-update', '--build-style=release', '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=%(port)s
MOCK: update_status: commit-queue Built patch
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive', '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=%(port)s
MOCK: update_status: commit-queue Passed tests
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000, '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10000 --port=%(port)s
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10000
-""" % {"port_name": CommitQueue.port_name},
+""" % {"port": "mac"},
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
- "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.' and additional comment 'Mock error message'\n",
+ "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10000' with comment 'Rejecting attachment 10000 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, expected_logs=expected_logs)
def test_rollout_lands(self):
- tool = MockTool(log_executive=True)
+ tool = MockTool()
tool.buildbot.light_tree_on_fire()
rollout_patch = tool.bugs.fetch_attachment(10005) # _patch6, a rollout patch.
assert(rollout_patch.is_rollout())
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("commit-queue"),
- "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean', '--port=%(port_name)s'], cwd=/mock-checkout
+ "process_work_item": """Running: webkit-patch --status-host=example.com clean --port=%(port)s
MOCK: update_status: commit-queue Cleaned working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update', '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com update --port=%(port)s
MOCK: update_status: commit-queue Updated working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10005, '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10005 --port=%(port)s
MOCK: update_status: commit-queue Applied patch
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'validate-changelog', '--non-interactive', 10005, '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10005 --port=%(port)s
MOCK: update_status: commit-queue ChangeLog validated
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10005, '--port=%(port_name)s'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com land-attachment --force-clean --non-interactive --parent-command=commit-queue 10005 --port=%(port)s
MOCK: update_status: commit-queue Landed patch
MOCK: update_status: commit-queue Pass
MOCK: release_work_item: commit-queue 10005
-""" % {"port_name": CommitQueue.port_name},
+""" % {"port": "mac"},
"handle_script_error": "ScriptError error message\n\nMOCK output\n",
- "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10005' with comment 'Rejecting attachment 10005 from commit-queue.' and additional comment 'Mock error message'\n",
+ "handle_unexpected_error": "MOCK setting flag 'commit-queue' to '-' on attachment '10005' with comment 'Rejecting attachment 10005 from commit-queue.\n\nMock error message'\n",
}
self.assert_queue_outputs(CommitQueue(), tool=tool, work_item=rollout_patch, expected_logs=expected_logs)
@@ -373,9 +382,9 @@ MOCK: update_status: commit-queue Tests passed, but commit failed (checkout out
state = {'patch': None}
OutputCapture().assert_outputs(self, sequence.run_and_handle_errors, [tool, options, state], expected_exception=TryAgain, expected_logs=expected_logs)
- self.assertEqual(options.update, True)
- self.assertEqual(options.build, False)
- self.assertEqual(options.test, False)
+ self.assertTrue(options.update)
+ self.assertFalse(options.build)
+ self.assertFalse(options.test)
def test_manual_reject_during_processing(self):
queue = SecondThoughtsCommitQueue(MockTool())
@@ -384,15 +393,22 @@ MOCK: update_status: commit-queue Tests passed, but commit failed (checkout out
queue._tool.filesystem.write_text_file('/tmp/layout-test-results/webkit_unit_tests_output.xml', '')
queue._options = Mock()
queue._options.port = None
- expected_logs = """MOCK: update_status: commit-queue Cleaned working directory
+ expected_logs = """Running: webkit-patch --status-host=example.com clean --port=mac
+MOCK: update_status: commit-queue Cleaned working directory
+Running: webkit-patch --status-host=example.com update --port=mac
MOCK: update_status: commit-queue Updated working directory
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue Applied patch
+Running: webkit-patch --status-host=example.com validate-changelog --check-oops --non-interactive 10000 --port=mac
MOCK: update_status: commit-queue ChangeLog validated
+Running: webkit-patch --status-host=example.com build --no-clean --no-update --build-style=release --port=mac
MOCK: update_status: commit-queue Built patch
+Running: webkit-patch --status-host=example.com build-and-test --no-clean --no-update --test --non-interactive --port=mac
MOCK: update_status: commit-queue Passed tests
MOCK: update_status: commit-queue Retry
MOCK: release_work_item: commit-queue 10000
"""
+ self.maxDiff = None
OutputCapture().assert_outputs(self, queue.process_work_item, [QueuesTest.mock_work_item], expected_logs=expected_logs)
def test_report_flaky_tests(self):
@@ -449,15 +465,15 @@ class StyleQueueTest(QueuesTest):
def test_style_queue_with_style_exception(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
- "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
+ "process_work_item": """Running: webkit-patch --status-host=example.com clean
MOCK: update_status: style-queue Cleaned working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com update
MOCK: update_status: style-queue Updated working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10000], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000
MOCK: update_status: style-queue Applied patch
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com apply-watchlist-local 50000
MOCK: update_status: style-queue Watchlist applied
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'check-style-local', '--non-interactive', '--quiet'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com check-style-local --non-interactive --quiet
MOCK: update_status: style-queue Style checked
MOCK: update_status: style-queue Pass
MOCK: release_work_item: style-queue 10000
@@ -465,21 +481,24 @@ MOCK: release_work_item: style-queue 10000
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK output\n",
}
- tool = MockTool(log_executive=True, executive_throws_when_run=set(['check-style']))
+ tool = MockTool(executive_throws_when_run=set(['check-style']))
self.assert_queue_outputs(StyleQueue(), expected_logs=expected_logs, tool=tool)
def test_style_queue_with_watch_list_exception(self):
expected_logs = {
"begin_work_queue": self._default_begin_work_queue_logs("style-queue"),
- "process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
+ "process_work_item": """Running: webkit-patch --status-host=example.com clean
MOCK: update_status: style-queue Cleaned working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'update'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com update
MOCK: update_status: style-queue Updated working directory
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-attachment', '--no-update', '--non-interactive', 10000], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com apply-attachment --no-update --non-interactive 10000
MOCK: update_status: style-queue Applied patch
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com apply-watchlist-local 50000
+Exception for ['echo', '--status-host=example.com', 'apply-watchlist-local', 50000]
+
+MOCK command output
MOCK: update_status: style-queue Unabled to apply watchlist
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'check-style-local', '--non-interactive', '--quiet'], cwd=/mock-checkout
+Running: webkit-patch --status-host=example.com check-style-local --non-interactive --quiet
MOCK: update_status: style-queue Style checked
MOCK: update_status: style-queue Pass
MOCK: release_work_item: style-queue 10000
@@ -487,5 +506,5 @@ MOCK: release_work_item: style-queue 10000
"handle_unexpected_error": "Mock error message\n",
"handle_script_error": "MOCK output\n",
}
- tool = MockTool(log_executive=True, executive_throws_when_run=set(['apply-watchlist-local']))
+ tool = MockTool(executive_throws_when_run=set(['apply-watchlist-local']))
self.assert_queue_outputs(StyleQueue(), expected_logs=expected_logs, tool=tool)
diff --git a/Tools/Scripts/webkitpy/tool/commands/queuestest.py b/Tools/Scripts/webkitpy/tool/commands/queuestest.py
index 314a64021..c633b8479 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queuestest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queuestest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.net.bugzilla import Attachment
from webkitpy.common.system.outputcapture import OutputCapture
@@ -37,7 +37,7 @@ from webkitpy.tool.mocktool import MockTool
class MockQueueEngine(object):
- def __init__(self, name, queue, wakeup_event):
+ def __init__(self, name, queue, wakeup_event, seconds_to_sleep):
pass
def run(self):
@@ -79,7 +79,7 @@ class QueuesTest(unittest.TestCase):
tool = MockTool()
# This is a hack to make it easy for callers to not have to setup a custom MockFileSystem just to test the commit-queue
# the cq tries to read the layout test results, and will hit a KeyError in MockFileSystem if we don't do this.
- tool.filesystem.write_text_file('/mock-results/results.html', "")
+ tool.filesystem.write_text_file('/mock-results/full_results.json', "")
if not expected_stdout:
expected_stdout = {}
if not expected_stderr:
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
index d9209b118..06d42d097 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -36,9 +36,9 @@ from webkitpy.common.system.executive import ScriptError
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST
-from webkitpy.layout_tests.port import builders
-from webkitpy.layout_tests.port import factory
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.port import builders
+from webkitpy.port import factory
+from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
@@ -49,8 +49,8 @@ def _baseline_name(fs, test_name, suffix):
return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
-class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
- # not overriding execute() - pylint: disable-msg=W0223
+class AbstractRebaseliningCommand(Command):
+ # not overriding execute() - pylint: disable=W0223
move_overwritten_baselines_option = optparse.make_option("--move-overwritten-baselines", action="store_true", default=False,
help="Move overwritten baselines elsewhere in the baseline path. This is for bringing up new ports.")
@@ -88,7 +88,7 @@ class RebaselineTest(AbstractRebaseliningCommand):
self._scm_changes = {'add': []}
def _results_url(self, builder_name):
- return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
+ return self._tool.buildbot.builder_with_name(builder_name).latest_layout_test_results_url()
def _baseline_directory(self, builder_name):
port = self._tool.port_factory.get_from_builder_name(builder_name)
@@ -152,7 +152,7 @@ class RebaselineTest(AbstractRebaseliningCommand):
path = port.path_to_test_expectations_file()
lock = self._tool.make_file_lock(path + '.lock')
lock.acquire_lock()
- expectations = TestExpectations(port, include_overrides=False)
+ expectations = TestExpectations(port, include_generic=False, include_overrides=False)
for test_configuration in port.all_test_configurations():
if test_configuration.version == port.test_configuration().version:
expectationsString = expectations.remove_configuration_from_test(test_name, test_configuration)
@@ -266,7 +266,7 @@ class AnalyzeBaselines(AbstractRebaseliningCommand):
class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
- # not overriding execute() - pylint: disable-msg=W0223
+ # not overriding execute() - pylint: disable=W0223
def _run_webkit_patch(self, args, verbose):
try:
@@ -454,22 +454,18 @@ class Rebaseline(AbstractParallelRebaselineCommand):
])
def _builders_to_pull_from(self):
- chromium_buildbot_builder_names = []
webkit_buildbot_builder_names = []
for name in builders.all_builder_names():
- if self._tool.port_factory.get_from_builder_name(name).is_chromium():
- chromium_buildbot_builder_names.append(name)
- else:
- webkit_buildbot_builder_names.append(name)
+ webkit_buildbot_builder_names.append(name)
- titles = ["build.webkit.org bots", "build.chromium.org bots"]
- lists = [webkit_buildbot_builder_names, chromium_buildbot_builder_names]
+ titles = ["build.webkit.org bots"]
+ lists = [webkit_buildbot_builder_names]
chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True)
return [self._builder_with_name(name) for name in chosen_names]
def _builder_with_name(self, name):
- return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
+ return self._tool.buildbot.builder_with_name(name)
def _tests_to_update(self, builder):
failing_tests = builder.latest_layout_test_results().tests_matching_failure_types([test_failures.FailureTextMismatch])
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
index cc25fae2b..43a8786fe 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
@@ -39,15 +39,15 @@ from webkitpy.tool.mocktool import MockTool, MockOptions
class _BaseTestCase(unittest.TestCase):
MOCK_WEB_RESULT = 'MOCK Web result, convert 404 to None=True'
- WEB_PREFIX = 'http://example.com/f/builders/WebKit Mac10.7/results/layout-test-results'
+ WEB_PREFIX = 'http://example.com/f/builders/Apple Lion Release WK1 (Tests)/results/layout-test-results'
command_constructor = None
def setUp(self):
self.tool = MockTool()
- self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable-msg=E1102
+ self.command = self.command_constructor() # lint warns that command_constructor might not be set, but this is intentional; pylint: disable=E1102
self.command.bind_to_tool(self.tool)
- self.lion_port = self.tool.port_factory.get_from_builder_name("WebKit Mac10.7")
+ self.lion_port = self.tool.port_factory.get_from_builder_name("Apple Lion Release WK1 (Tests)")
self.lion_expectations_path = self.lion_port.path_to_test_expectations_file()
# FIXME: we should override builders._exact_matches here to point to a set
@@ -78,20 +78,21 @@ class TestRebaselineTest(_BaseTestCase):
def setUp(self):
super(TestRebaselineTest, self).setUp()
- self.options = MockOptions(builder="WebKit Mac10.7", test="userscripts/another-test.html", suffixes="txt",
+ self.options = MockOptions(builder="Apple Lion Release WK1 (Tests)", test="userscripts/another-test.html", suffixes="txt",
move_overwritten_baselines_to=None, results_directory=None)
def test_baseline_directory(self):
command = self.command
- self.assertEqual(command._baseline_directory("Apple Win XP Debug (Tests)"), "/mock-checkout/LayoutTests/platform/win-xp")
- self.assertEqual(command._baseline_directory("Apple Win 7 Release (Tests)"), "/mock-checkout/LayoutTests/platform/win")
- self.assertEqual(command._baseline_directory("Apple Lion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-lion")
- self.assertEqual(command._baseline_directory("Apple Lion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-wk2")
- self.assertEqual(command._baseline_directory("GTK Linux 32-bit Release"), "/mock-checkout/LayoutTests/platform/gtk")
- self.assertEqual(command._baseline_directory("EFL Linux 64-bit Release WK2"), "/mock-checkout/LayoutTests/platform/efl-wk2")
- self.assertEqual(command._baseline_directory("Qt Linux Release"), "/mock-checkout/LayoutTests/platform/qt")
- self.assertEqual(command._baseline_directory("WebKit Mac10.7"), "/mock-checkout/LayoutTests/platform/chromium-mac-lion")
- self.assertEqual(command._baseline_directory("WebKit Mac10.6"), "/mock-checkout/LayoutTests/platform/chromium-mac-snowleopard")
+ self.assertMultiLineEqual(command._baseline_directory("Apple Win XP Debug (Tests)"), "/mock-checkout/LayoutTests/platform/win-xp")
+ self.assertMultiLineEqual(command._baseline_directory("Apple Win 7 Release (Tests)"), "/mock-checkout/LayoutTests/platform/win")
+ self.assertMultiLineEqual(command._baseline_directory("Apple Lion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-lion")
+ self.assertMultiLineEqual(command._baseline_directory("Apple Lion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac-wk2")
+ self.assertMultiLineEqual(command._baseline_directory("Apple MountainLion Release WK1 (Tests)"), "/mock-checkout/LayoutTests/platform/mac")
+ self.assertMultiLineEqual(command._baseline_directory("Apple MountainLion Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/mac")
+ self.assertMultiLineEqual(command._baseline_directory("GTK Linux 64-bit Debug"), "/mock-checkout/LayoutTests/platform/gtk-wk1")
+ self.assertMultiLineEqual(command._baseline_directory("GTK Linux 64-bit Release WK2 (Tests)"), "/mock-checkout/LayoutTests/platform/gtk-wk2")
+ self.assertMultiLineEqual(command._baseline_directory("EFL Linux 64-bit Release WK2"), "/mock-checkout/LayoutTests/platform/efl-wk2")
+ self.assertMultiLineEqual(command._baseline_directory("Qt Linux Release"), "/mock-checkout/LayoutTests/platform/qt")
def test_rebaseline_updates_expectations_file_noop(self):
self._zero_out_test_expectations()
@@ -105,12 +106,12 @@ Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
self.options.suffixes = "png,wav,txt"
self.command._rebaseline_test_and_update_expectations(self.options)
- self.assertEqual(self.tool.web.urls_fetched,
+ self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
- self.assertEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
+ self.assertMultiLineEqual(new_expectations, """Bug(B) [ Mac Linux XP Debug ] fast/dom/Window/window-postmessage-clone-really-deep-array.html [ Pass ]
Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
""")
@@ -121,79 +122,78 @@ Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
self.options.suffixes = 'png,wav,txt'
self.command._rebaseline_test_and_update_expectations(self.options)
- self.assertEqual(self.tool.web.urls_fetched,
+ self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
- self.assertEqual(new_expectations, "Bug(x) [ MountainLion SnowLeopard ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+ self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_does_not_include_overrides(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
- self._write(self.lion_port.path_from_chromium_base('skia', 'skia_test_expectations.txt'), "Bug(y) [ Mac ] other-test.html [ Failure ]\n")
self._write("userscripts/another-test.html", "Dummy test contents")
self.options.suffixes = 'png,wav,txt'
self.command._rebaseline_test_and_update_expectations(self.options)
- self.assertEqual(self.tool.web.urls_fetched,
+ self.assertItemsEqual(self.tool.web.urls_fetched,
[self.WEB_PREFIX + '/userscripts/another-test-actual.png',
self.WEB_PREFIX + '/userscripts/another-test-actual.wav',
self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
new_expectations = self._read(self.lion_expectations_path)
- self.assertEqual(new_expectations, "Bug(x) [ MountainLion SnowLeopard ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
+ self.assertMultiLineEqual(new_expectations, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nBug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
def test_rebaseline_test(self):
- self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", None, "txt", self.WEB_PREFIX)
- self.assertEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+ self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", None, "txt", self.WEB_PREFIX)
+ self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
def test_rebaseline_test_with_results_directory(self):
self._write(self.lion_expectations_path, "Bug(x) [ Mac ] userscripts/another-test.html [ ImageOnlyFailure ]\nbug(z) [ Linux ] userscripts/another-test.html [ ImageOnlyFailure ]\n")
self.options.results_directory = '/tmp'
self.command._rebaseline_test_and_update_expectations(self.options)
- self.assertEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
+ self.assertItemsEqual(self.tool.web.urls_fetched, ['file:///tmp/userscripts/another-test-actual.txt'])
def test_rebaseline_test_and_print_scm_changes(self):
self.command._print_scm_changes = True
self.command._scm_changes = {'add': [], 'delete': []}
self.tool._scm.exists = lambda x: False
- self.command._rebaseline_test("WebKit Linux", "userscripts/another-test.html", None, "txt", None)
+ self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", None, "txt", None)
- self.assertEqual(self.command._scm_changes, {'add': ['/mock-checkout/LayoutTests/platform/chromium-linux/userscripts/another-test-expected.txt'], 'delete': []})
+ self.assertDictEqual(self.command._scm_changes, {'add': ['/mock-checkout/LayoutTests/platform/mac-lion/userscripts/another-test-expected.txt'], 'delete': []})
def test_rebaseline_and_copy_test(self):
self._write("userscripts/another-test-expected.txt", "generic result")
- self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
+ self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
- self.assertEqual(self._read('platform/chromium-mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
- self.assertEqual(self._read('platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt'), 'generic result')
+ self.assertMultiLineEqual(self._read('platform/mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
+ self.assertMultiLineEqual(self._read('platform/mac-wk2/userscripts/another-test-expected.txt'), 'generic result')
def test_rebaseline_and_copy_test_no_existing_result(self):
- self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
+ self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
- self.assertEqual(self._read('platform/chromium-mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
- self.assertFalse(self.tool.filesystem.exists(self._expand('platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt')))
+ self.assertMultiLineEqual(self._read('platform/mac-lion/userscripts/another-test-expected.txt'), self.MOCK_WEB_RESULT)
+ self.assertFalse(self.tool.filesystem.exists(self._expand('platform/mac-lion-wk2/userscripts/another-test-expected.txt')))
def test_rebaseline_and_copy_test_with_lion_result(self):
- self._write("platform/chromium-mac-lion/userscripts/another-test-expected.txt", "original lion result")
+ self._write("platform/mac-lion/userscripts/another-test-expected.txt", "original lion result")
- self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", self.WEB_PREFIX)
+ self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", self.WEB_PREFIX)
- self.assertEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
- self.assertEqual(self._read("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt"), "original lion result")
- self.assertEqual(self._read("platform/chromium-mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
+ self.assertItemsEqual(self.tool.web.urls_fetched, [self.WEB_PREFIX + '/userscripts/another-test-actual.txt'])
+ self.assertMultiLineEqual(self._read("platform/mac-wk2/userscripts/another-test-expected.txt"), "original lion result")
+ self.assertMultiLineEqual(self._read("platform/mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
def test_rebaseline_and_copy_no_overwrite_test(self):
- self._write("platform/chromium-mac-lion/userscripts/another-test-expected.txt", "original lion result")
- self._write("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt", "original snowleopard result")
+ self._write("platform/mac-lion/userscripts/another-test-expected.txt", "original lion result")
+ self._write("platform/mac-lion-wk2/userscripts/another-test-expected.txt", "original lion wk2 result")
- self.command._rebaseline_test("WebKit Mac10.7", "userscripts/another-test.html", ["chromium-mac-snowleopard"], "txt", None)
+ self.command._rebaseline_test("Apple Lion Release WK1 (Tests)", "userscripts/another-test.html", ["mac-lion-wk2"], "txt", None)
- self.assertEqual(self._read("platform/chromium-mac-snowleopard/userscripts/another-test-expected.txt"), "original snowleopard result")
- self.assertEqual(self._read("platform/chromium-mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
+ self.assertMultiLineEqual(self._read("platform/mac-lion-wk2/userscripts/another-test-expected.txt"), "original lion wk2 result")
+ self.assertMultiLineEqual(self._read("platform/mac-lion/userscripts/another-test-expected.txt"), self.MOCK_WEB_RESULT)
def test_rebaseline_test_internal_with_move_overwritten_baselines_to(self):
self.tool.executive = MockExecutive2()
@@ -220,8 +220,8 @@ Bug(A) [ Debug ] : fast/css/large-list-of-rules-crash.html [ Failure ]
out, _, _ = oc.restore_output()
builders._exact_matches = old_exact_matches
- self.assertEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
- self.assertEqual(out, '{"add": []}\n')
+ self.assertMultiLineEqual(self._read(self.tool.filesystem.join(port.layout_tests_dir(), 'platform/test-mac-leopard/failures/expected/image-expected.txt')), 'original snowleopard result')
+ self.assertMultiLineEqual(out, '{"add": []}\n')
class TestRebaselineJson(_BaseTestCase):
@@ -335,8 +335,8 @@ class TestRebaselineExpectations(_BaseTestCase):
# FIXME: change this to use the test- ports.
calls = filter(lambda x: x != ['qmake', '-v'], self.tool.executive.calls)
- self.assertTrue(len(calls) == 1)
- self.assertTrue(len(calls[0]) == 26)
+ self.assertEqual(len(calls), 1)
+ self.assertEqual(len(calls[0]), 22)
def test_rebaseline_expectations_noop(self):
self._zero_out_test_expectations()
@@ -362,7 +362,7 @@ class TestRebaselineExpectations(_BaseTestCase):
'Bug(y) userscripts/test.html [ Crash ]\n')}
self._write('/userscripts/another-test.html', '')
- self.assertEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
+ self.assertDictEqual(self.command._tests_to_rebaseline(self.lion_port), {'userscripts/another-test.html': set(['png', 'txt', 'wav'])})
self.assertEqual(self._read(self.lion_expectations_path), '')
@@ -383,7 +383,7 @@ class TestAnalyzeBaselines(_BaseTestCase):
self.tool.port_factory.get = (lambda port_name=None, options=None: self.port)
self.lines = []
self.command._optimizer_class = _FakeOptimizer
- self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable-msg=W0108
+ self.command._write = (lambda msg: self.lines.append(msg)) # pylint bug warning about unnecessary lambda? pylint: disable=W0108
def test_default(self):
self.command.execute(MockOptions(suffixes='txt', missing=False, platform=None), ['passes/text.html'], self.tool)
diff --git a/Tools/Scripts/webkitpy/tool/commands/roll.py b/Tools/Scripts/webkitpy/tool/commands/roll.py
deleted file mode 100644
index 37481b2b8..000000000
--- a/Tools/Scripts/webkitpy/tool/commands/roll.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
-
-from webkitpy.tool import steps
-
-
-class RollChromiumDEPS(AbstractSequencedCommand):
- name = "roll-chromium-deps"
- help_text = "Updates Chromium DEPS (defaults to the last-known good revision of Chromium)"
- argument_names = "[CHROMIUM_REVISION]"
- steps = [
- steps.UpdateChromiumDEPS,
- steps.PrepareChangeLogForDEPSRoll,
- steps.ConfirmDiff,
- steps.Commit,
- ]
-
- def _prepare_state(self, options, args, tool):
- return {
- "chromium_revision": (args and args[0]),
- }
-
-
-class PostChromiumDEPSRoll(AbstractSequencedCommand):
- name = "post-chromium-deps-roll"
- help_text = "Posts a patch to update Chromium DEPS (revision defaults to the last-known good revision of Chromium)"
- argument_names = "CHROMIUM_REVISION CHROMIUM_REVISION_NAME"
- steps = [
- steps.CleanWorkingDirectory,
- steps.Update,
- steps.UpdateChromiumDEPS,
- steps.PrepareChangeLogForDEPSRoll,
- steps.CreateBug,
- steps.PostDiff,
- ]
-
- def _prepare_state(self, options, args, tool):
- options.review = False
- options.request_commit = True
-
- chromium_revision = args[0]
- chromium_revision_name = args[1]
- return {
- "chromium_revision": chromium_revision,
- "bug_title": "Roll Chromium DEPS to %s" % chromium_revision_name,
- "bug_description": "A DEPS roll a day keeps the build break away.",
- }
diff --git a/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py b/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py
deleted file mode 100644
index 1dae497bc..000000000
--- a/Tools/Scripts/webkitpy/tool/commands/roll_unittest.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from webkitpy.thirdparty.mock import Mock
-from webkitpy.tool.commands.commandtest import CommandsTest
-from webkitpy.tool.commands.roll import *
-from webkitpy.tool.mocktool import MockOptions, MockTool
-
-
-class RollCommandsTest(CommandsTest):
- def test_update_chromium_deps(self):
- expected_logs = """Updating Chromium DEPS to 6764
-MOCK: MockDEPS.write_variable(chromium_rev, 6764)
-MOCK: user.open_url: file://...
-Was that diff correct?
-Committed r49824: <http://trac.webkit.org/changeset/49824>
-"""
- self.assert_execute_outputs(RollChromiumDEPS(), [6764], expected_logs=expected_logs)
-
- def test_update_chromium_deps_older_revision(self):
- options = MockOptions(non_interactive=False)
- expected_logs = """Current Chromium DEPS revision 6564 is newer than 5764.
-Unable to update Chromium DEPS
-"""
- self.assert_execute_outputs(RollChromiumDEPS(), [5764], options=options, expected_logs=expected_logs, expected_exception=SystemExit)
-
-
-class PostRollCommandsTest(CommandsTest):
- def test_prepare_state(self):
- postroll = PostChromiumDEPSRoll()
- options = MockOptions()
- tool = MockTool()
- lkgr_state = postroll._prepare_state(options, [None, "last-known good revision"], tool)
- self.assertEqual(None, lkgr_state["chromium_revision"])
- self.assertEqual("Roll Chromium DEPS to last-known good revision", lkgr_state["bug_title"])
- revision_state = postroll._prepare_state(options, ["1234", "r1234"], tool)
- self.assertEqual("1234", revision_state["chromium_revision"])
- self.assertEqual("Roll Chromium DEPS to r1234", revision_state["bug_title"])
diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
index 0f91be3ef..aea3b51bf 100644
--- a/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
+++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
@@ -38,7 +38,7 @@ _log = logging.getLogger(__name__)
class SheriffBot(AbstractQueue, StepSequenceErrorHandler):
- name = "sheriff-bot"
+ name = "webkitbot"
watchers = AbstractQueue.watchers + [
"abarth@webkit.org",
"eric@webkit.org",
@@ -49,7 +49,7 @@ class SheriffBot(AbstractQueue, StepSequenceErrorHandler):
def begin_work_queue(self):
AbstractQueue.begin_work_queue(self)
self._sheriff = Sheriff(self._tool, self)
- self._irc_bot = IRCBot("sheriffbot", self._tool, self._sheriff, irc_commands)
+ self._irc_bot = IRCBot(self.name, self._tool, self._sheriff, irc_commands)
self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
def work_item_log_path(self, failure_map):
diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py
index 9aa57b123..76caaf3c0 100644
--- a/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot_unittest.py
@@ -26,8 +26,22 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.commands.queuestest import QueuesTest
+from webkitpy.tool.commands.queuestest import QueuesTest, MockQueueEngine
+from webkitpy.tool.commands import SheriffBot
+from webkitpy.tool.mocktool import MockTool, MockOptions
+from webkitpy.tool.bot.irc_command import Rollout
class SheriffBotTest(QueuesTest):
- pass # No unittests as the moment.
+ def test_command_aliases(self):
+ tool = MockTool()
+ options = MockOptions()
+ options.ensure_value("confirm", False)
+ options.ensure_value("seconds_to_sleep", 120)
+ sheriffbot = SheriffBot()
+ sheriffbot.execute(options, [], tool, MockQueueEngine)
+ sheriffbot.begin_work_queue()
+ irc_bot = sheriffbot._irc_bot
+ # Test Rollout command aliases
+ revert_command, args = irc_bot._parse_command_and_args("revert")
+ self.assertEqual(revert_command, Rollout)
diff --git a/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py b/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
index c197a1116..6244c295f 100644
--- a/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
+++ b/Tools/Scripts/webkitpy/tool/commands/suggestnominations.py
@@ -32,12 +32,119 @@ import re
from webkitpy.common.checkout.changelog import ChangeLogEntry
from webkitpy.common.config.committers import CommitterList
-from webkitpy.tool import steps
from webkitpy.tool.grammar import join_with_separators
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
-class SuggestNominations(AbstractDeclarativeCommand):
+class CommitLogError(Exception):
+ def __init__(self):
+ Exception.__init__(self)
+
+
+class CommitLogMissingReviewer(CommitLogError):
+ def __init__(self):
+ CommitLogError.__init__(self)
+
+
+class AbstractCommitLogCommand(Command):
+ _leading_indent_regexp = re.compile(r"^[ ]{4}", re.MULTILINE)
+ _reviewed_by_regexp = re.compile(ChangeLogEntry.reviewed_by_regexp, re.MULTILINE)
+ _patch_by_regexp = re.compile(r'^Patch by (?P<name>.+?)\s+<(?P<email>[^<>]+)> on (?P<date>\d{4}-\d{2}-\d{2})$', re.MULTILINE)
+ _committer_regexp = re.compile(r'^Author: (?P<email>\S+)\s+<[^>]+>$', re.MULTILINE)
+ _date_regexp = re.compile(r'^Date: (?P<date>\d{4}-\d{2}-\d{2}) (?P<time>\d{2}:\d{2}:\d{2}) [\+\-]\d{4}$', re.MULTILINE)
+ _revision_regexp = re.compile(r'^git-svn-id: http://svn.webkit.org/repository/webkit/trunk@(?P<svnid>\d+) (?P<gitid>[0-9a-f\-]{36})$', re.MULTILINE)
+
+ def __init__(self, options=None):
+ options = options or []
+ options += [
+ make_option("--max-commit-age", action="store", dest="max_commit_age", type="int", default=9, help="Specify maximum commit age to consider (in months)."),
+ ]
+ options = sorted(options, cmp=lambda a, b: cmp(a._long_opts, b._long_opts))
+ super(AbstractCommitLogCommand, self).__init__(options=options)
+ # FIXME: This should probably be on the tool somewhere.
+ self._committer_list = CommitterList()
+
+ def _init_options(self, options):
+ self.verbose = options.verbose
+ self.max_commit_age = options.max_commit_age
+
+ # FIXME: This should move to scm.py
+ def _recent_commit_messages(self):
+ git_log = self._tool.executive.run_command(['git', 'log', '--date=iso', '--since="%s months ago"' % self.max_commit_age])
+ messages = re.compile(r"^commit \w{40}$", re.MULTILINE).split(git_log)[1:] # Ignore the first message which will be empty.
+ for message in messages:
+ # Unindent all the lines
+ (message, _) = self._leading_indent_regexp.subn("", message)
+ yield message.lstrip() # Remove any leading newlines from the log message.
+
+ def _author_name_from_email(self, email):
+ contributor = self._committer_list.contributor_by_email(email)
+ return contributor.full_name if contributor else None
+
+ def _contributor_from_email(self, email):
+ contributor = self._committer_list.contributor_by_email(email)
+ return contributor if contributor else None
+
+ def _parse_commit_message(self, commit_message):
+ committer_match = self._committer_regexp.search(commit_message)
+ if not committer_match:
+ raise CommitLogError
+
+ committer_email = committer_match.group('email')
+ if not committer_email:
+ raise CommitLogError
+
+ committer = self._contributor_from_email(committer_email)
+ if not committer:
+ raise CommitLogError
+
+ commit_date_match = self._date_regexp.search(commit_message)
+ if not commit_date_match:
+ raise CommitLogError
+ commit_date = commit_date_match.group('date')
+
+ revision_match = self._revision_regexp.search(commit_message)
+ if not revision_match:
+ raise CommitLogError
+ revision = revision_match.group('svnid')
+
+ # Look for "Patch by" line first, which is used for non-committer contributors;
+ # otherwise, use committer info determined above.
+ author_match = self._patch_by_regexp.search(commit_message)
+ if not author_match:
+ author_match = committer_match
+
+ author_email = author_match.group('email')
+ if not author_email:
+ author_email = committer_email
+
+ author_name = author_match.group('name') if 'name' in author_match.groupdict() else None
+ if not author_name:
+ author_name = self._author_name_from_email(author_email)
+ if not author_name:
+ raise CommitLogError
+
+ contributor = self._contributor_from_email(author_email)
+ if contributor and author_name != contributor.full_name and contributor.full_name:
+ author_name = contributor.full_name
+
+ reviewer_match = self._reviewed_by_regexp.search(commit_message)
+ if not reviewer_match:
+ raise CommitLogMissingReviewer
+ reviewers = reviewer_match.group('reviewer')
+
+ return {
+ 'committer': committer,
+ 'commit_date': commit_date,
+ 'revision': revision,
+ 'author_email': author_email,
+ 'author_name': author_name,
+ 'contributor': contributor,
+ 'reviewers': reviewers,
+ }
+
+
+class SuggestNominations(AbstractCommitLogCommand):
name = "suggest-nominations"
help_text = "Suggest contributors for committer/reviewer nominations"
@@ -45,118 +152,70 @@ class SuggestNominations(AbstractDeclarativeCommand):
options = [
make_option("--committer-minimum", action="store", dest="committer_minimum", type="int", default=10, help="Specify minimum patch count for Committer nominations."),
make_option("--reviewer-minimum", action="store", dest="reviewer_minimum", type="int", default=80, help="Specify minimum patch count for Reviewer nominations."),
- make_option("--max-commit-age", action="store", dest="max_commit_age", type="int", default=9, help="Specify max commit age to consider for nominations (in months)."),
make_option("--show-commits", action="store_true", dest="show_commits", default=False, help="Show commit history with nomination suggestions."),
]
-
- AbstractDeclarativeCommand.__init__(self, options=options)
- # FIXME: This should probably be on the tool somewhere.
- self._committer_list = CommitterList()
-
- _counters_by_name = {}
- _counters_by_email = {}
+ super(SuggestNominations, self).__init__(options=options)
def _init_options(self, options):
+ super(SuggestNominations, self)._init_options(options)
self.committer_minimum = options.committer_minimum
self.reviewer_minimum = options.reviewer_minimum
- self.max_commit_age = options.max_commit_age
self.show_commits = options.show_commits
- self.verbose = options.verbose
- # FIXME: This should move to scm.py
- def _recent_commit_messages(self):
- git_log = self._tool.executive.run_command(['git', 'log', '--since="%s months ago"' % self.max_commit_age])
- match_git_svn_id = re.compile(r"\n\n git-svn-id:.*\n", re.MULTILINE)
- match_get_log_lines = re.compile(r"^\S.*\n", re.MULTILINE)
- match_leading_indent = re.compile(r"^[ ]{4}", re.MULTILINE)
+ def _count_commit(self, commit, analysis):
+ author_name = commit['author_name']
+ author_email = commit['author_email']
+ revision = commit['revision']
+ commit_date = commit['commit_date']
+
+ # See if we already have a contributor with this author_name or email
+ counter_by_name = analysis['counters_by_name'].get(author_name)
+ counter_by_email = analysis['counters_by_email'].get(author_email)
+ if counter_by_name:
+ if counter_by_email:
+ if counter_by_name != counter_by_email:
+ # Merge these two counters This is for the case where we had
+ # John Smith (jsmith@gmail.com) and Jonathan Smith (jsmith@apple.com)
+ # and just found a John Smith (jsmith@apple.com). Now we know the
+ # two names are the same person
+ counter_by_name['names'] |= counter_by_email['names']
+ counter_by_name['emails'] |= counter_by_email['emails']
+ counter_by_name['count'] += counter_by_email.get('count', 0)
+ analysis['counters_by_email'][author_email] = counter_by_name
+ else:
+ # Add email to the existing counter
+ analysis['counters_by_email'][author_email] = counter_by_name
+ counter_by_name['emails'] |= set([author_email])
+ else:
+ if counter_by_email:
+ # Add name to the existing counter
+ analysis['counters_by_name'][author_name] = counter_by_email
+ counter_by_email['names'] |= set([author_name])
+ else:
+ # Create new counter
+ new_counter = {'names': set([author_name]), 'emails': set([author_email]), 'latest_name': author_name, 'latest_email': author_email, 'commits': ""}
+ analysis['counters_by_name'][author_name] = new_counter
+ analysis['counters_by_email'][author_email] = new_counter
- messages = re.split(r"commit \w{40}", git_log)[1:] # Ignore the first message which will be empty.
- for message in messages:
- # Remove any lines from git and unindent all the lines
- (message, _) = match_git_svn_id.subn("", message)
- (message, _) = match_get_log_lines.subn("", message)
- (message, _) = match_leading_indent.subn("", message)
- yield message.lstrip() # Remove any leading newlines from the log message.
+ assert(analysis['counters_by_name'][author_name] == analysis['counters_by_email'][author_email])
+ counter = analysis['counters_by_name'][author_name]
+ counter['count'] = counter.get('count', 0) + 1
- # e.g. Patch by Eric Seidel <eric@webkit.org> on 2011-09-15
- patch_by_regexp = r'^Patch by (?P<name>.+?)\s+<(?P<email>[^<>]+)> on (?P<date>\d{4}-\d{2}-\d{2})$'
+ if revision.isdigit():
+ revision = "http://trac.webkit.org/changeset/" + revision
+ counter['commits'] += " commit: %s on %s by %s (%s)\n" % (revision, commit_date, author_name, author_email)
def _count_recent_patches(self):
- # This entire block could be written as a map/reduce over the messages.
- for message in self._recent_commit_messages():
- # FIXME: This should use ChangeLogEntry to do the entire parse instead
- # of grabbing at its regexps.
- dateline_match = re.match(ChangeLogEntry.date_line_regexp, message, re.MULTILINE)
- if not dateline_match:
- # Modern commit messages don't just dump the ChangeLog entry, but rather
- # have a special Patch by line for non-committers.
- dateline_match = re.search(self.patch_by_regexp, message, re.MULTILINE)
- if not dateline_match:
- continue
-
- author_email = dateline_match.group("email")
- if not author_email:
- continue
-
- # We only care about reviewed patches, so make sure it has a valid reviewer line.
- reviewer_match = re.search(ChangeLogEntry.reviewed_by_regexp, message, re.MULTILINE)
- # We might also want to validate the reviewer name against the committer list.
- if not reviewer_match or not reviewer_match.group("reviewer"):
- continue
-
- author_name = dateline_match.group("name")
- if not author_name:
+ analysis = {
+ 'counters_by_name': {},
+ 'counters_by_email': {},
+ }
+ for commit_message in self._recent_commit_messages():
+ try:
+ self._count_commit(self._parse_commit_message(commit_message), analysis)
+ except CommitLogError, exception:
continue
-
- if re.search("([^a-zA-Z]and[^a-zA-Z])|(,)|(@)", author_name):
- # This entry seems to have multiple reviewers, or invalid characters, so reject it.
- continue
-
- svn_id_match = re.search(ChangeLogEntry.svn_id_regexp, message, re.MULTILINE)
- if svn_id_match:
- svn_id = svn_id_match.group("svnid")
- if not svn_id_match or not svn_id:
- svn_id = "unknown"
- commit_date = dateline_match.group("date")
-
- # See if we already have a contributor with this name or email
- counter_by_name = self._counters_by_name.get(author_name)
- counter_by_email = self._counters_by_email.get(author_email)
- if counter_by_name:
- if counter_by_email:
- if counter_by_name != counter_by_email:
- # Merge these two counters This is for the case where we had
- # John Smith (jsmith@gmail.com) and Jonathan Smith (jsmith@apple.com)
- # and just found a John Smith (jsmith@apple.com). Now we know the
- # two names are the same person
- counter_by_name['names'] |= counter_by_email['names']
- counter_by_name['emails'] |= counter_by_email['emails']
- counter_by_name['count'] += counter_by_email.get('count', 0)
- self._counters_by_email[author_email] = counter_by_name
- else:
- # Add email to the existing counter
- self._counters_by_email[author_email] = counter_by_name
- counter_by_name['emails'] |= set([author_email])
- else:
- if counter_by_email:
- # Add name to the existing counter
- self._counters_by_name[author_name] = counter_by_email
- counter_by_email['names'] |= set([author_name])
- else:
- # Create new counter
- new_counter = {'names': set([author_name]), 'emails': set([author_email]), 'latest_name': author_name, 'latest_email': author_email, 'commits': ""}
- self._counters_by_name[author_name] = new_counter
- self._counters_by_email[author_email] = new_counter
-
- assert(self._counters_by_name[author_name] == self._counters_by_email[author_email])
- counter = self._counters_by_name[author_name]
- counter['count'] = counter.get('count', 0) + 1
-
- if svn_id.isdigit():
- svn_id = "http://trac.webkit.org/changeset/" + svn_id
- counter['commits'] += " commit: %s on %s by %s (%s)\n" % (svn_id, commit_date, author_name, author_email)
-
- return self._counters_by_email
+ return analysis['counters_by_email']
def _collect_nominations(self, counters_by_email):
nominations = []
@@ -172,7 +231,7 @@ class SuggestNominations(AbstractDeclarativeCommand):
if patch_count >= self.committer_minimum and (not contributor or not contributor.can_commit):
roles.append("committer")
- if patch_count >= self.reviewer_minimum and (not contributor or not contributor.can_review):
+ if patch_count >= self.reviewer_minimum and contributor and contributor.can_commit and not contributor.can_review:
roles.append("reviewer")
if roles:
nominations.append({
@@ -183,7 +242,7 @@ class SuggestNominations(AbstractDeclarativeCommand):
})
return nominations
- def _print_nominations(self, nominations):
+ def _print_nominations(self, nominations, counters_by_email):
def nomination_cmp(a_nomination, b_nomination):
roles_result = cmp(a_nomination['roles'], b_nomination['roles'])
if roles_result:
@@ -197,7 +256,7 @@ class SuggestNominations(AbstractDeclarativeCommand):
# This is a little bit of a hack, but its convienent to just pass the nomination dictionary to the formating operator.
nomination['roles_string'] = join_with_separators(nomination['roles']).upper()
print "%(roles_string)s: %(author_name)s (%(author_email)s) has %(patch_count)s reviewed patches" % nomination
- counter = self._counters_by_email[nomination['author_email']]
+ counter = counters_by_email[nomination['author_email']]
if self.show_commits:
print counter['commits']
@@ -238,10 +297,9 @@ class SuggestNominations(AbstractDeclarativeCommand):
self._init_options(options)
patch_counts = self._count_recent_patches()
nominations = self._collect_nominations(patch_counts)
- self._print_nominations(nominations)
+ self._print_nominations(nominations, patch_counts)
if self.verbose:
self._print_counts(patch_counts)
-
if __name__ == "__main__":
SuggestNominations()
diff --git a/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py b/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py
index 88be25303..63054c516 100644
--- a/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/suggestnominations_unittest.py
@@ -36,7 +36,7 @@ class SuggestNominationsTest(CommandsTest):
mock_git_output = """commit 60831dde5beb22f35aef305a87fca7b5f284c698
Author: fpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
-Date: Thu Sep 15 19:56:21 2011 +0000
+Date: 2011-09-15 19:56:21 +0000
Value profiles collect no information for global variables
https://bugs.webkit.org/show_bug.cgi?id=68143
@@ -45,21 +45,43 @@ Date: Thu Sep 15 19:56:21 2011 +0000
git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc
"""
- mock_same_author_commit_message = """Value profiles collect no information for global variables
+ mock_same_author_commit_message = """Author: fpizlo@apple.com <fpizlo@apple.com@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
+Date: 2011-09-15 19:56:21 +0000
+
+Value profiles collect no information for global variables
https://bugs.webkit.org/show_bug.cgi?id=68143
-Reviewed by Geoffrey Garen."""
+Reviewed by Geoffrey Garen.
+
+git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95219 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+"""
+
+ def _make_options(self, **kwargs):
+ defaults = {
+ 'committer_minimum': 10,
+ 'max_commit_age': 9,
+ 'reviewer_minimum': 80,
+ 'show_commits': False,
+ 'verbose': False,
+ }
+ options = MockOptions(**defaults)
+ options.update(**kwargs)
+ return options
def test_recent_commit_messages(self):
tool = MockTool()
suggest_nominations = SuggestNominations()
- suggest_nominations._init_options(options=MockOptions(reviewer_minimum=80, committer_minimum=10, max_commit_age=9, show_commits=False, verbose=False))
+ suggest_nominations._init_options(options=self._make_options())
suggest_nominations.bind_to_tool(tool)
tool.executive.run_command = lambda command: self.mock_git_output
self.assertEqual(list(suggest_nominations._recent_commit_messages()), [self.mock_same_author_commit_message])
- mock_non_committer_commit_message = """Let TestWebKitAPI work for chromium
+ mock_non_committer_commit_message = """
+Author: commit-queue@webkit.org <commit-queue@webkit.org@268f45cc-cd09-0410-ab3c-d52691b4dbfc>
+Date: 2009-09-15 14:08:42 +0000
+
+Let TestWebKitAPI work for chromium
https://bugs.webkit.org/show_bug.cgi?id=67756
Patch by Xianzhu Wang <wangxianzhu@chromium.org> on 2011-09-15
@@ -67,11 +89,15 @@ Reviewed by Sam Weinig.
Source/WebKit/chromium:
-* WebKit.gyp:"""
+* WebKit.gyp:
+
+git-svn-id: http://svn.webkit.org/repository/webkit/trunk@95188 268f45cc-cd09-0410-ab3c-d52691b4dbfc
+"""
def test_basic(self):
expected_stdout = "REVIEWER: Xianzhu Wang (wangxianzhu@chromium.org) has 88 reviewed patches\n"
+ options = self._make_options()
suggest_nominations = SuggestNominations()
- suggest_nominations._init_options(options=MockOptions(reviewer_minimum=80, committer_minimum=10, max_commit_age=9, show_commits=False, verbose=False))
+ suggest_nominations._init_options(options=options)
suggest_nominations._recent_commit_messages = lambda: [self.mock_non_committer_commit_message for _ in range(88)]
- self.assert_execute_outputs(suggest_nominations, [], expected_stdout=expected_stdout, options=MockOptions(reviewer_minimum=80, committer_minimum=10, max_commit_age=9, show_commits=False, verbose=False))
+ self.assert_execute_outputs(suggest_nominations, [], expected_stdout=expected_stdout, options=options)
diff --git a/Tools/Scripts/webkitpy/tool/commands/upload.py b/Tools/Scripts/webkitpy/tool/commands/upload.py
index 5cd0de9e0..69dc4f715 100644
--- a/Tools/Scripts/webkitpy/tool/commands/upload.py
+++ b/Tools/Scripts/webkitpy/tool/commands/upload.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
# Copyright (c) 2009, 2010 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
@@ -44,12 +43,12 @@ from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.commands.abstractsequencedcommand import AbstractSequencedCommand
from webkitpy.tool.comments import bug_comment_from_svn_revision
from webkitpy.tool.grammar import pluralize, join_with_separators
-from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
+from webkitpy.tool.multicommandtool import Command
_log = logging.getLogger(__name__)
-class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
+class CommitMessageForCurrentDiff(Command):
name = "commit-message"
help_text = "Print a commit message suitable for the uncommitted changes"
@@ -57,7 +56,7 @@ class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
options = [
steps.Options.git_commit,
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
def execute(self, options, args, tool):
# This command is a useful test to make sure commit_message_for_this_commit
@@ -65,7 +64,7 @@ class CommitMessageForCurrentDiff(AbstractDeclarativeCommand):
print "%s" % tool.checkout().commit_message_for_this_commit(options.git_commit).message()
-class CleanPendingCommit(AbstractDeclarativeCommand):
+class CleanPendingCommit(Command):
name = "clean-pending-commit"
help_text = "Clear r+ on obsolete patches so they do not appear in the pending-commit list."
@@ -95,7 +94,7 @@ class CleanPendingCommit(AbstractDeclarativeCommand):
# FIXME: This should be share more logic with AssignToCommitter and CleanPendingCommit
-class CleanReviewQueue(AbstractDeclarativeCommand):
+class CleanReviewQueue(Command):
name = "clean-review-queue"
help_text = "Clear r? on obsolete patches so they do not appear in the pending-review list."
@@ -120,7 +119,7 @@ class CleanReviewQueue(AbstractDeclarativeCommand):
self._tool.bugs.obsolete_attachment(patch.id(), message)
-class AssignToCommitter(AbstractDeclarativeCommand):
+class AssignToCommitter(Command):
name = "assign-to-committer"
help_text = "Assign bug to whoever attached the most recent r+'d patch"
@@ -242,6 +241,15 @@ class LandSafely(AbstractPatchUploadingCommand):
]
+class HasLanded(AbstractPatchUploadingCommand):
+ name = "has-landed"
+ help_text = "Check that the current code was successfully landed and no changes remain."
+ argument_names = "[BUGID]"
+ steps = [
+ steps.HasLanded,
+ ]
+
+
class Prepare(AbstractSequencedCommand):
name = "prepare"
help_text = "Creates a bug (or prompts for an existing bug) and prepares the ChangeLogs"
@@ -298,7 +306,7 @@ class EditChangeLogs(AbstractSequencedCommand):
]
-class PostCommits(AbstractDeclarativeCommand):
+class PostCommits(Command):
name = "post-commits"
help_text = "Attach a range of local commits to bugs as patch files"
argument_names = "COMMITISH"
@@ -312,7 +320,7 @@ class PostCommits(AbstractDeclarativeCommand):
steps.Options.review,
steps.Options.request_commit,
]
- AbstractDeclarativeCommand.__init__(self, options=options, requires_local_commits=True)
+ Command.__init__(self, options=options, requires_local_commits=True)
def _comment_text_for_commit(self, options, commit_message, tool, commit_id):
comment_text = None
@@ -350,7 +358,7 @@ class PostCommits(AbstractDeclarativeCommand):
# FIXME: This command needs to be brought into the modern age with steps and CommitInfo.
-class MarkBugFixed(AbstractDeclarativeCommand):
+class MarkBugFixed(Command):
name = "mark-bug-fixed"
help_text = "Mark the specified bug as fixed"
argument_names = "[SVN_REVISION]"
@@ -361,7 +369,7 @@ class MarkBugFixed(AbstractDeclarativeCommand):
make_option("--open", action="store_true", default=False, dest="open_bug", help="Open bug in default web browser (Mac only)."),
make_option("--update-only", action="store_true", default=False, dest="update_only", help="Add comment to the bug, but do not close it."),
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
# FIXME: We should be using checkout().changelog_entries_for_revision(...) instead here.
def _fetch_commit_log(self, tool, svn_revision):
@@ -431,7 +439,7 @@ class MarkBugFixed(AbstractDeclarativeCommand):
# FIXME: Requires unit test. Blocking issue: too complex for now.
-class CreateBug(AbstractDeclarativeCommand):
+class CreateBug(Command):
name = "create-bug"
help_text = "Create a bug from local changes or local commits"
argument_names = "[COMMITISH]"
@@ -444,7 +452,7 @@ class CreateBug(AbstractDeclarativeCommand):
make_option("--no-review", action="store_false", dest="review", default=True, help="Do not mark the patch for review."),
make_option("--request-commit", action="store_true", dest="request_commit", default=False, help="Mark the patch as needing auto-commit after review."),
]
- AbstractDeclarativeCommand.__init__(self, options=options)
+ Command.__init__(self, options=options)
def create_bug_from_commit(self, options, args, tool):
commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
diff --git a/Tools/Scripts/webkitpy/tool/comments.py b/Tools/Scripts/webkitpy/tool/comments.py
index 771953e69..771953e69 100755..100644
--- a/Tools/Scripts/webkitpy/tool/comments.py
+++ b/Tools/Scripts/webkitpy/tool/comments.py
diff --git a/Tools/Scripts/webkitpy/tool/gcovr b/Tools/Scripts/webkitpy/tool/gcovr
new file mode 100755
index 000000000..e4a1f8850
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/gcovr
@@ -0,0 +1,1029 @@
+#! /usr/bin/env python
+#
+# A report generator for gcov 3.4
+#
+# This routine generates a format that is similar to the format generated
+# by the Python coverage.py module. This code is similar to the
+# data processing performed by lcov's geninfo command. However, we
+# don't worry about parsing the *.gcna files, and backwards compatibility for
+# older versions of gcov is not supported.
+#
+# Outstanding issues
+# - verify that gcov 3.4 or newer is being used
+# - verify support for symbolic links
+#
+# gcovr is a FAST project. For documentation, bug reporting, and
+# updates, see https://software.sandia.gov/trac/fast/wiki/gcovr
+#
+# _________________________________________________________________________
+#
+# FAST: Utilities for Agile Software Development
+# Copyright (c) 2008 Sandia Corporation.
+# This software is distributed under the BSD License.
+# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
+# the U.S. Government retains certain rights in this software.
+# For more information, see the FAST README.txt file.
+#
+# $Revision: 2839 $
+# $Date: 2013-05-27 11:13:17 -0700 (Mon, 27 May 2013) $
+# _________________________________________________________________________
+#
+
+import copy
+import glob
+import os
+import re
+import subprocess
+import sys
+import time
+import xml.dom.minidom
+
+from optparse import OptionParser
+from string import Template
+from os.path import normpath
+
+__version__ = "2.5-prerelease"
+src_revision = "$Revision: 2839 $"
+gcov_cmd = "gcov"
+
+output_re = re.compile("[Cc]reating [`'](.*)'$")
+source_re = re.compile("cannot open (source|graph) file")
+
+starting_dir = os.getcwd()
+
+
+def version_str():
+ ans = __version__
+ m = re.match('\$Revision:\s*(\S+)\s*\$', src_revision)
+ if m:
+ ans = ans + " (r%s)" % (m.group(1))
+ return ans
+
+#
+# Container object for coverage statistics
+#
+class CoverageData(object):
+
+ def __init__(self, fname, uncovered, uncovered_exceptional, covered, branches, noncode):
+ self.fname=fname
+ # Shallow copies are cheap & "safe" because the caller will
+ # throw away their copies of covered & uncovered after calling
+ # us exactly *once*
+ self.uncovered = copy.copy(uncovered)
+ self.uncovered_exceptional = copy.copy(uncovered_exceptional)
+ self.covered = copy.copy(covered)
+ self.noncode = copy.copy(noncode)
+ # But, a deep copy is required here
+ self.all_lines = copy.deepcopy(uncovered)
+ self.all_lines.update(uncovered_exceptional)
+ self.all_lines.update(covered.keys())
+ self.branches = copy.deepcopy(branches)
+
+ def update(self, uncovered, uncovered_exceptional, covered, branches, noncode):
+ self.all_lines.update(uncovered)
+ self.all_lines.update(uncovered_exceptional)
+ self.all_lines.update(covered.keys())
+ self.uncovered.update(uncovered)
+ self.uncovered_exceptional.update(uncovered_exceptional)
+ self.noncode.intersection_update(noncode)
+ for k in covered.keys():
+ self.covered[k] = self.covered.get(k,0) + covered[k]
+ for k in branches.keys():
+ for b in branches[k]:
+ d = self.branches.setdefault(k, {})
+ d[b] = d.get(b, 0) + branches[k][b]
+ self.uncovered.difference_update(self.covered.keys())
+ self.uncovered_exceptional.difference_update(self.covered.keys())
+
+ def uncovered_str(self, exceptional):
+ if options.show_branch:
+ # Don't do any aggregation on branch results
+ tmp = []
+ for line in self.branches.keys():
+ for branch in self.branches[line]:
+ if self.branches[line][branch] == 0:
+ tmp.append(line)
+ break
+
+ tmp.sort()
+ return ",".join([str(x) for x in tmp]) or ""
+
+ if exceptional:
+ tmp = list(self.uncovered_exceptional)
+ else:
+ tmp = list(self.uncovered)
+ if len(tmp) == 0:
+ return ""
+
+ tmp.sort()
+ first = None
+ last = None
+ ranges=[]
+ for item in tmp:
+ if last is None:
+ first=item
+ last=item
+ elif item == (last+1):
+ last=item
+ else:
+ if len(self.noncode.intersection(range(last+1,item))) \
+ == item - last - 1:
+ last = item
+ continue
+
+ if first==last:
+ ranges.append(str(first))
+ else:
+ ranges.append(str(first)+"-"+str(last))
+ first=item
+ last=item
+ if first==last:
+ ranges.append(str(first))
+ else:
+ ranges.append(str(first)+"-"+str(last))
+ return ",".join(ranges)
+
+ def coverage(self):
+ if ( options.show_branch ):
+ total = 0
+ cover = 0
+ for line in self.branches.keys():
+ for branch in self.branches[line].keys():
+ total += 1
+ cover += self.branches[line][branch] > 0 and 1 or 0
+ else:
+ total = len(self.all_lines)
+ cover = len(self.covered)
+
+ percent = total and str(int(100.0*cover/total)) or "--"
+ return (total, cover, percent)
+
+ def summary(self):
+ tmp = options.filter.sub('',self.fname)
+ if not self.fname.endswith(tmp):
+ # Do no truncation if the filter does not start matching at
+ # the beginning of the string
+ tmp = self.fname
+ tmp = tmp.ljust(40)
+ if len(tmp) > 40:
+ tmp=tmp+"\n"+" "*40
+
+ (total, cover, percent) = self.coverage()
+ uncovered_lines = self.uncovered_str(False)
+ if not options.show_branch:
+ t = self.uncovered_str(True)
+ if len(t):
+ uncovered_lines += " [* " + t + "]";
+ return ( total, cover,
+ tmp + str(total).rjust(8) + str(cover).rjust(8) + \
+ percent.rjust(6) + "% " + uncovered_lines )
+
+
+def resolve_symlinks(orig_path):
+ """
+ Return the normalized absolute path name with all symbolic links resolved
+ """
+ drive,tmp = os.path.splitdrive(os.path.abspath(orig_path))
+ if not drive:
+ drive = os.path.sep
+ parts = tmp.split(os.path.sep)
+ actual_path = [drive]
+ while parts:
+ actual_path.append(parts.pop(0))
+ if not os.path.islink(os.path.join(*actual_path)):
+ continue
+ actual_path[-1] = os.readlink(os.path.join(*actual_path))
+ tmp_drive, tmp_path = os.path.splitdrive(
+ resolve_symlinks(os.path.join(*actual_path)) )
+ if tmp_drive:
+ drive = tmp_drive
+ actual_path = [drive] + tmp_path.split(os.path.sep)
+ return os.path.join(*actual_path)
+
+
+def path_startswith(path, base):
+ return path.startswith(base) and (
+ len(base) == len(path) or path[len(base)] == os.path.sep )
+
+
+class PathAliaser(object):
+ def __init__(self):
+ self.aliases = {}
+ self.master_targets = set()
+ self.preferred_name = {}
+
+ def master_path(self, path):
+ match_found = False
+ while True:
+ for base, alias in self.aliases.items():
+ if path_startswith(path, base):
+ path = alias + path[len(base):]
+ match_found = True
+ break
+ for master_base in self.master_targets:
+ if path_startswith(path, master_base):
+ return path, master_base, True
+ if match_found:
+ sys.stderr.write(
+ "(ERROR) violating fundamental assumption while walking "
+ "directory tree.\n\tPlease report this to the gcovr "
+ "developers.\n" )
+ return path, None, match_found
+
+ def unalias_path(self, path):
+ path = resolve_symlinks(path)
+ path, master_base, known_path = self.master_path(path)
+ if not known_path:
+ return path
+ # Try and resolve the preferred name for this location
+ if master_base in self.preferred_name:
+ return self.preferred_name[master_base] + path[len(master_base):]
+ return path
+
+ def add_master_target(self, master):
+ self.master_targets.add(master)
+
+ def add_alias(self, target, master):
+ self.aliases[target] = master
+
+ def set_preferred(self, master, preferred):
+ self.preferred_name[master] = preferred
+
+aliases = PathAliaser()
+
+# This is UGLY. Here's why: UNIX resolves symbolic links by walking the
+# entire directory structure. What that means is that relative links
+# are always relative to the actual directory inode, and not the
+# "virtual" path that the user might have traversed (over symlinks) on
+# the way to that directory. Here's the canonical example:
+#
+# a / b / c / testfile
+# a / d / e --> ../../a/b
+# m / n --> /a
+# x / y / z --> /m/n/d
+#
+# If we start in "y", we will see the following directory structure:
+# y
+# |-- z
+# |-- e
+# |-- c
+# |-- testfile
+#
+# The problem is that using a simple traversal based on the Python
+# documentation:
+#
+# (os.path.join(os.path.dirname(path), os.readlink(result)))
+#
+# will not work: we will see a link to /m/n/d from /x/y, but completely
+# miss the fact that n is itself a link. If we then naively attempt to
+# apply the "c" relative link, we get an intermediate path that looks
+# like "/m/n/d/e/../../a/b", which would get normalized to "/m/n/a/b"; a
+# nonexistant path. The solution is that we need to walk the original
+# path, along with the full path of all links 1 directory at a time and
+# check for embedded symlinks.
+#
+def link_walker(path):
+ targets = [os.path.abspath(path)]
+ while targets:
+ target_dir = targets.pop(0)
+ actual_dir = resolve_symlinks(target_dir)
+ #print "target dir: %s (%s)" % (target_dir, actual_dir)
+ master_name, master_base, visited = aliases.master_path(actual_dir)
+ if visited:
+ #print " ...root already visited as %s" % master_name
+ aliases.add_alias(target_dir, master_name)
+ continue
+ if master_name != target_dir:
+ aliases.set_preferred(master_name, target_dir)
+ aliases.add_alias(target_dir, master_name)
+ aliases.add_master_target(master_name)
+ #print " ...master name = %s" % master_name
+ #print " ...walking %s" % target_dir
+ for root, dirs, files in os.walk(target_dir, topdown=True):
+ #print " ...reading %s" % root
+ for d in dirs:
+ tmp = os.path.abspath(os.path.join(root, d))
+ #print " ...checking %s" % tmp
+ if os.path.islink(tmp):
+ #print " ...buffering link %s" % tmp
+ targets.append(tmp)
+ yield root, dirs, files
+
+
+def search_file(expr, path):
+ """
+ Given a search path, recursively descend to find files that match a
+ regular expression.
+ """
+ ans = []
+ pattern = re.compile(expr)
+ if path is None or path == ".":
+ path = os.getcwd()
+ elif not os.path.exists(path):
+ raise IOError("Unknown directory '"+path+"'")
+ for root, dirs, files in link_walker(path):
+ for name in files:
+ if pattern.match(name):
+ name = os.path.join(root,name)
+ if os.path.islink(name):
+ ans.append( os.path.abspath(os.readlink(name)) )
+ else:
+ ans.append( os.path.abspath(name) )
+ return ans
+
+
+#
+# Get the list of datafiles in the directories specified by the user
+#
+def get_datafiles(flist, options):
+ allfiles=[]
+ for dir in flist:
+ if options.verbose:
+ sys.stdout.write( "Scanning directory %s for gcda/gcno files...\n"
+ % (dir, ) )
+ files = search_file(".*\.gc(da|no)$", dir)
+ # gcno files will *only* produce uncovered results; however,
+ # that is useful information for the case where a compilation
+ # unit is never actually exercised by the test code. So, we
+ # will process gcno files, but ONLY if there is no corresponding
+ # gcda file.
+ gcda_files = [file for file in files if file.endswith('gcda')]
+ tmp = set(gcda_files)
+ gcno_files = [ file for file in files if
+ file.endswith('gcno') and file[:-2]+'da' not in tmp ]
+ if options.verbose:
+ sys.stdout.write(
+ "Found %d files (and will process %d)\n" %
+ ( len(files), len(gcda_files) + len(gcno_files) ) )
+ allfiles.extend(gcda_files)
+ allfiles.extend(gcno_files)
+ return allfiles
+
+
+def process_gcov_data(file, covdata, options):
+ INPUT = open(file,"r")
+ #
+ # Get the filename
+ #
+ line = INPUT.readline()
+ segments=line.split(':',3)
+ if len(segments) != 4 or not segments[2].lower().strip().endswith('source'):
+ raise RuntimeError('Fatal error parsing gcov file, line 1: \n\t"%s"' % line.rstrip())
+ currdir = os.getcwd()
+ os.chdir(starting_dir)
+ fname = aliases.unalias_path(os.path.abspath((segments[-1]).strip()))
+ os.chdir(currdir)
+ if options.verbose:
+ sys.stdout.write("Parsing coverage data for file %s\n" % fname)
+ #
+ # Return if the filename does not match the filter
+ #
+ if not options.filter.match(fname):
+ if options.verbose:
+ sys.stdout.write(" Filtering coverage data for file %s\n" % fname)
+ return
+ #
+ # Return if the filename matches the exclude pattern
+ #
+ for i in range(0,len(options.exclude)):
+ if options.exclude[i].match(options.filter.sub('',fname)) or \
+ options.exclude[i].match(fname) or \
+ options.exclude[i].match(os.path.abspath(fname)):
+ if options.verbose:
+ sys.stdout.write(" Excluding coverage data for file %s\n" % fname)
+ return
+ #
+ # Parse each line, and record the lines
+ # that are uncovered
+ #
+ noncode = set()
+ uncovered = set()
+ uncovered_exceptional = set()
+ covered = {}
+ branches = {}
+ #first_record=True
+ lineno = 0
+ for line in INPUT:
+ segments=line.split(":",2)
+ #print "HERE", segments
+ tmp = segments[0].strip()
+ if len(segments) > 1:
+ try:
+ lineno = int(segments[1].strip())
+ except:
+ pass # keep previous line number!
+
+ if tmp[0] == '#':
+ uncovered.add( lineno )
+ elif tmp[0] == '=':
+ uncovered_exceptional.add( lineno )
+ elif tmp[0] in "0123456789":
+ covered[lineno] = int(segments[0].strip())
+ elif tmp[0] == '-':
+ # remember certain non-executed lines
+ code = segments[2].strip()
+ if len(code) == 0 or code == "{" or code == "}" or \
+ code.startswith("//") or code == 'else':
+ noncode.add( lineno )
+ elif tmp.startswith('branch'):
+ fields = line.split()
+ try:
+ count = int(fields[3])
+ branches.setdefault(lineno, {})[int(fields[1])] = count
+ except:
+ # We ignore branches that were "never executed"
+ pass
+ elif tmp.startswith('call'):
+ pass
+ elif tmp.startswith('function'):
+ pass
+ elif tmp[0] == 'f':
+ pass
+ #if first_record:
+ #first_record=False
+ #uncovered.add(prev)
+ #if prev in uncovered:
+ #tokens=re.split('[ \t]+',tmp)
+ #if tokens[3] != "0":
+ #uncovered.remove(prev)
+ #prev = int(segments[1].strip())
+ #first_record=True
+ else:
+ sys.stderr.write(
+ "(WARNING) Unrecognized GCOV output: '%s'\n"
+ "\tThis is indicitive of a gcov output parse error.\n"
+ "\tPlease report this to the gcovr developers." % tmp )
+ ##print 'uncovered',uncovered
+ ##print 'covered',covered
+ ##print 'branches',branches
+ ##print 'noncode',noncode
+ #
+ # If the file is already in covdata, then we
+ # remove lines that are covered here. Otherwise,
+ # initialize covdata
+ #
+ if not fname in covdata:
+ covdata[fname] = CoverageData(fname,uncovered,uncovered_exceptional,covered,branches,noncode)
+ else:
+ covdata[fname].update(uncovered,uncovered_exceptional,covered,branches,noncode)
+ INPUT.close()
+
+#
+# Process a datafile (generated by running the instrumented application)
+# and run gcov with the corresponding arguments
+#
+# This is trickier than it sounds: The gcda/gcno files are stored in the
+# same directory as the object files; however, gcov must be run from the
+# same directory where gcc/g++ was run. Normally, the user would know
+# where gcc/g++ was invoked from and could tell gcov the path to the
+# object (and gcda) files with the --object-directory command.
+# Unfortunately, we do everything backwards: gcovr looks for the gcda
+# files and then has to infer the original gcc working directory.
+#
+# In general, (but not always) we can assume that the gcda file is in a
+# subdirectory of the original gcc working directory, so we will first
+# try ".", and on error, move up the directory tree looking for the
+# correct working directory (letting gcov's own error codes dictate when
+# we hit the right directory). This covers 90+% of the "normal" cases.
+# The exception to this is if gcc was invoked with "-o ../[...]" (i.e.,
+# the object directory was a peer (not a parent/child) of the cwd. In
+# this case, things are really tough. We accept an argument
+# (--object-directory) that SHOULD BE THE SAME as the one povided to
+# gcc. We will then walk that path (backwards) in the hopes of
+# identifying the original gcc working directory (there is a bit of
+# trial-and-error here)
+#
+def process_datafile(filename, covdata, options):
+ #
+ # Launch gcov
+ #
+ abs_filename = os.path.abspath(filename)
+ (dirname,fname) = os.path.split(abs_filename)
+ #(name,ext) = os.path.splitext(base)
+
+ potential_wd = []
+ errors=[]
+ Done = False
+
+ if options.objdir:
+ src_components = abs_filename.split(os.sep)
+ components = normpath(options.objdir).split(os.sep)
+ idx = 1
+ while idx <= len(components):
+ if idx > len(src_components):
+ break
+ if components[-1*idx] != src_components[-1*idx]:
+ break
+ idx += 1
+ if idx > len(components):
+ pass # a parent dir; the normal process will find it
+ elif components[-1*idx] == '..':
+ dirs = [ os.path.join(src_components[:len(src_components)-idx+1]) ]
+ while idx <= len(components) and components[-1*idx] == '..':
+ tmp = []
+ for d in dirs:
+ for f in os.listdir(d):
+ x = os.path.join(d,f)
+ if os.path.isdir(x):
+ tmp.append(x)
+ dirs = tmp
+ idx += 1
+ potential_wd = dirs
+ else:
+ if components[0] == '':
+ # absolute path
+ tmp = [ options.objdir ]
+ else:
+ # relative path: check relative to both the cwd and the
+ # gcda file
+ tmp = [ os.path.join(x, options.objdir) for x in
+ [os.path.dirname(abs_filename), os.getcwd()] ]
+ potential_wd = [ testdir for testdir in tmp
+ if os.path.isdir(testdir) ]
+ if len(potential_wd) == 0:
+ errors.append("ERROR: cannot identify the location where GCC "
+ "was run using --object-directory=%s\n" %
+ options.objdir)
+ # Revert to the normal
+ #sys.exit(1)
+
+ # no objdir was specified (or it was a parent dir); walk up the dir tree
+ if len(potential_wd) == 0:
+ wd = os.path.split(abs_filename)[0]
+ while True:
+ potential_wd.append(wd)
+ wd = os.path.split(wd)[0]
+ if wd == potential_wd[-1]:
+ break
+
+ cmd = [ gcov_cmd, abs_filename,
+ "--branch-counts", "--branch-probabilities", "--preserve-paths",
+ '--object-directory', dirname ]
+
+ # NB: We are lazy English speakers, so we will only parse English output
+ env = dict(os.environ)
+ env['LC_ALL'] = 'en_US'
+
+
+ while len(potential_wd) > 0 and not Done:
+ # NB: either len(potential_wd) == 1, or all entires are absolute
+ # paths, so we don't have to chdir(starting_dir) at every
+ # iteration.
+ os.chdir(potential_wd.pop(0))
+
+
+ #if options.objdir:
+ # cmd.extend(["--object-directory", Template(options.objdir).substitute(filename=filename, head=dirname, tail=base, root=name, ext=ext)])
+
+ if options.verbose:
+ sys.stdout.write("Running gcov: '%s' in '%s'\n" % ( ' '.join(cmd), os.getcwd() ))
+ (out, err) = subprocess.Popen( cmd, env=env,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE ).communicate()
+ out=out.decode('utf-8')
+ err=err.decode('utf-8')
+
+ # find the files that gcov created
+ gcov_files = {'active':[], 'filter':[], 'exclude':[]}
+ for line in out.splitlines():
+ found = output_re.search(line.strip())
+ if found is not None:
+ fname = found.group(1)
+ if not options.gcov_filter.match(fname):
+ if options.verbose:
+ sys.stdout.write("Filtering gcov file %s\n" % fname)
+ gcov_files['filter'].append(fname)
+ continue
+ exclude=False
+ for i in range(0,len(options.gcov_exclude)):
+ if options.gcov_exclude[i].match(options.gcov_filter.sub('',fname)) or \
+ options.gcov_exclude[i].match(fname) or \
+ options.gcov_exclude[i].match(os.path.abspath(fname)):
+ exclude=True
+ break
+ if not exclude:
+ gcov_files['active'].append(fname)
+ elif options.verbose:
+ sys.stdout.write("Excluding gcov file %s\n" % fname)
+ gcov_files['exclude'].append(fname)
+
+ if source_re.search(err):
+ # gcov tossed errors: try the next potential_wd
+ errors.append(err)
+ else:
+ # Process *.gcov files
+ for fname in gcov_files['active']:
+ process_gcov_data(fname, covdata, options)
+ Done = True
+
+ if not options.keep:
+ for group in gcov_files.values():
+ for fname in group:
+ if os.path.exists(fname):
+ # Only remove files that actually exist.
+ os.remove(fname)
+
+ os.chdir(starting_dir)
+ if options.delete:
+ if not abs_filename.endswith('gcno'):
+ os.remove(abs_filename)
+
+ if not Done:
+ sys.stderr.write(
+ "(WARNING) GCOV produced the following errors processing %s:\n"
+ "\t %s"
+ "\t(gcovr could not infer a working directory that resolved it.)\n"
+ % ( filename, "\t ".join(errors) ) )
+
+#
+# Produce the classic gcovr text report
+#
+def print_text_report(covdata):
+ def _num_uncovered(key):
+ (total, covered, percent) = covdata[key].coverage()
+ return total - covered
+ def _percent_uncovered(key):
+ (total, covered, percent) = covdata[key].coverage()
+ if covered:
+ return -1.0*covered/total
+ else:
+ return total or 1e6
+ def _alpha(key):
+ return key
+
+ if options.output:
+ OUTPUT = open(options.output,'w')
+ else:
+ OUTPUT = sys.stdout
+ total_lines=0
+ total_covered=0
+ # Header
+ OUTPUT.write("-"*78 + '\n')
+ a = options.show_branch and "Branches" or "Lines"
+ b = options.show_branch and "Taken" or "Exec"
+ c = "Missing"
+ OUTPUT.write("File".ljust(40) + a.rjust(8) + b.rjust(8)+ " Cover " + c + "\n")
+ OUTPUT.write("-"*78 + '\n')
+
+ # Data
+ keys = list(covdata.keys())
+ keys.sort(key=options.sort_uncovered and _num_uncovered or \
+ options.sort_percent and _percent_uncovered or _alpha)
+ for key in keys:
+ (t, n, txt) = covdata[key].summary()
+ total_lines += t
+ total_covered += n
+ OUTPUT.write(txt + '\n')
+
+ # Footer & summary
+ OUTPUT.write("-"*78 + '\n')
+ percent = total_lines and str(int(100.0*total_covered/total_lines)) or "--"
+ OUTPUT.write("TOTAL".ljust(40) + str(total_lines).rjust(8) + \
+ str(total_covered).rjust(8) + str(percent).rjust(6)+"%" + '\n')
+ OUTPUT.write("-"*78 + '\n')
+
+ # Close logfile
+ if options.output:
+ OUTPUT.close()
+
+#
+# Produce an XML report in the Cobertura format
+#
+def print_xml_report(covdata):
+ branchTotal = 0
+ branchCovered = 0
+ lineTotal = 0
+ lineCovered = 0
+
+ options.show_branch = True
+ for key in covdata.keys():
+ (total, covered, percent) = covdata[key].coverage()
+ branchTotal += total
+ branchCovered += covered
+
+ options.show_branch = False
+ for key in covdata.keys():
+ (total, covered, percent) = covdata[key].coverage()
+ lineTotal += total
+ lineCovered += covered
+
+ impl = xml.dom.minidom.getDOMImplementation()
+ docType = impl.createDocumentType(
+ "coverage", None,
+ "http://cobertura.sourceforge.net/xml/coverage-03.dtd" )
+ doc = impl.createDocument(None, "coverage", docType)
+ root = doc.documentElement
+ root.setAttribute( "line-rate", lineTotal == 0 and '0.0' or
+ str(float(lineCovered) / lineTotal) )
+ root.setAttribute( "branch-rate", branchTotal == 0 and '0.0' or
+ str(float(branchCovered) / branchTotal) )
+ root.setAttribute( "timestamp", str(int(time.time())) )
+ root.setAttribute( "version", "gcovr %s" % (version_str(),) )
+
+ # Generate the <sources> element: this is either the root directory
+ # (specified by --root), or the CWD.
+ sources = doc.createElement("sources")
+ root.appendChild(sources)
+
+ # Generate the coverage output (on a per-package basis)
+ packageXml = doc.createElement("packages")
+ root.appendChild(packageXml)
+ packages = {}
+ source_dirs = set()
+
+ keys = list(covdata.keys())
+ keys.sort()
+ for f in keys:
+ data = covdata[f]
+ dir = options.filter.sub('',f)
+ if f.endswith(dir):
+ src_path = f[:-1*len(dir)]
+ if len(src_path) > 0:
+ while dir.startswith(os.path.sep):
+ src_path += os.path.sep
+ dir = dir[len(os.path.sep):]
+ source_dirs.add(src_path)
+ else:
+ # Do no truncation if the filter does not start matching at
+ # the beginning of the string
+ dir = f
+ (dir, fname) = os.path.split(dir)
+
+ package = packages.setdefault(
+ dir, [ doc.createElement("package"), {},
+ 0, 0, 0, 0 ] )
+ c = doc.createElement("class")
+ lines = doc.createElement("lines")
+ c.appendChild(lines)
+
+ class_lines = 0
+ class_hits = 0
+ class_branches = 0
+ class_branch_hits = 0
+ for line in data.all_lines:
+ hits = data.covered.get(line, 0)
+ class_lines += 1
+ if hits > 0:
+ class_hits += 1
+ l = doc.createElement("line")
+ l.setAttribute("number", str(line))
+ l.setAttribute("hits", str(hits))
+ branches = data.branches.get(line)
+ if branches is None:
+ l.setAttribute("branch", "false")
+ else:
+ b_hits = 0
+ for v in branches.values():
+ if v > 0:
+ b_hits += 1
+ coverage = 100*b_hits/len(branches)
+ l.setAttribute("branch", "true")
+ l.setAttribute( "condition-coverage",
+ "%i%% (%i/%i)" %
+ (coverage, b_hits, len(branches)) )
+ cond = doc.createElement('condition')
+ cond.setAttribute("number", "0")
+ cond.setAttribute("type", "jump")
+ cond.setAttribute("coverage", "%i%%" % ( coverage ) )
+ class_branch_hits += b_hits
+ class_branches += float(len(branches))
+ conditions = doc.createElement("conditions")
+ conditions.appendChild(cond)
+ l.appendChild(conditions)
+
+ lines.appendChild(l)
+
+ className = fname.replace('.', '_')
+ c.setAttribute("name", className)
+ c.setAttribute("filename", os.path.join(dir, fname))
+ c.setAttribute("line-rate", str(class_hits / (1.0*class_lines or 1.0)))
+ c.setAttribute( "branch-rate",
+ str(class_branch_hits / (1.0*class_branches or 1.0)) )
+ c.setAttribute("complexity", "0.0")
+
+ package[1][className] = c
+ package[2] += class_hits
+ package[3] += class_lines
+ package[4] += class_branch_hits
+ package[5] += class_branches
+
+ for packageName, packageData in packages.items():
+ package = packageData[0];
+ packageXml.appendChild(package)
+ classes = doc.createElement("classes")
+ package.appendChild(classes)
+ classNames = list(packageData[1].keys())
+ classNames.sort()
+ for className in classNames:
+ classes.appendChild(packageData[1][className])
+ package.setAttribute("name", packageName.replace(os.sep, '.'))
+ package.setAttribute("line-rate", str(packageData[2]/(1.0*packageData[3] or 1.0)))
+ package.setAttribute( "branch-rate", str(packageData[4] / (1.0*packageData[5] or 1.0) ))
+ package.setAttribute("complexity", "0.0")
+
+
+ # Populate the <sources> element: this is either the root directory
+ # (specified by --root), or relative directories based
+ # on the filter, or the CWD
+ if options.root is not None:
+ source = doc.createElement("source")
+ source.appendChild(doc.createTextNode(options.root.strip()))
+ sources.appendChild(source)
+ elif len(source_dirs) > 0:
+ cwd = os.getcwd()
+ for d in source_dirs:
+ source = doc.createElement("source")
+ if d.startswith(cwd):
+ reldir = d[len(cwd):].lstrip(os.path.sep)
+ elif cwd.startswith(d):
+ i = 1
+ while normpath(d) != normpath(os.path.join(*tuple([cwd]+['..']*i))):
+ i += 1
+ reldir = os.path.join(*tuple(['..']*i))
+ else:
+ reldir = d
+ source.appendChild(doc.createTextNode(reldir.strip()))
+ sources.appendChild(source)
+ else:
+ source = doc.createElement("source")
+ source.appendChild(doc.createTextNode('.'))
+ sources.appendChild(source)
+
+ if options.prettyxml:
+ import textwrap
+ lines = doc.toprettyxml(" ").split('\n')
+ for i in xrange(len(lines)):
+ n=0
+ while n < len(lines[i]) and lines[i][n] == " ":
+ n += 1
+ lines[i] = "\n".join(textwrap.wrap(lines[i], 78, break_long_words=False, break_on_hyphens=False, subsequent_indent=" "+ n*" "))
+ xmlString = "\n".join(lines)
+ #print textwrap.wrap(doc.toprettyxml(" "), 80)
+ else:
+ xmlString = doc.toprettyxml(indent="")
+ if options.output is None:
+ sys.stdout.write(xmlString+'\n')
+ else:
+ OUTPUT = open(options.output, 'w')
+ OUTPUT.write(xmlString +'\n')
+ OUTPUT.close()
+
+
+##
+## MAIN
+##
+
+#
+# Create option parser
+#
+parser = OptionParser()
+parser.add_option("--version",
+ help="Print the version number, then exit",
+ action="store_true",
+ dest="version",
+ default=False)
+parser.add_option("-v","--verbose",
+ help="Print progress messages",
+ action="store_true",
+ dest="verbose",
+ default=False)
+parser.add_option('--object-directory',
+ help="Specify the directory that contains the gcov data files. gcovr must be able to identify the path between the *.gcda files and the directory where gcc was originally run. Normally, gcovr can guess correctly. This option overrides gcovr's normal path detection and can specify either the path from gcc to the gcda file (i.e. what was passed to gcc's '-o' option), or the path from the gcda file to gcc's original working directory.",
+ action="store",
+ dest="objdir",
+ default=None)
+parser.add_option("-o","--output",
+ help="Print output to this filename",
+ action="store",
+ dest="output",
+ default=None)
+parser.add_option("-k","--keep",
+ help="Keep the temporary *.gcov files generated by gcov. By default, these are deleted.",
+ action="store_true",
+ dest="keep",
+ default=False)
+parser.add_option("-d","--delete",
+ help="Delete the coverage files after they are processed. These are generated by the users's program, and by default gcovr does not remove these files.",
+ action="store_true",
+ dest="delete",
+ default=False)
+parser.add_option("-f","--filter",
+ help="Keep only the data files that match this regular expression",
+ action="store",
+ dest="filter",
+ default=None)
+parser.add_option("-e","--exclude",
+ help="Exclude data files that match this regular expression",
+ action="append",
+ dest="exclude",
+ default=[])
+parser.add_option("--gcov-filter",
+ help="Keep only gcov data files that match this regular expression",
+ action="store",
+ dest="gcov_filter",
+ default=None)
+parser.add_option("--gcov-exclude",
+ help="Exclude gcov data files that match this regular expression",
+ action="append",
+ dest="gcov_exclude",
+ default=[])
+parser.add_option("-r","--root",
+ help="Defines the root directory. This is used to filter the files, and to standardize the output.",
+ action="store",
+ dest="root",
+ default=None)
+parser.add_option("-x","--xml",
+ help="Generate XML instead of the normal tabular output.",
+ action="store_true",
+ dest="xml",
+ default=False)
+parser.add_option("--xml-pretty",
+ help="Generate pretty XML instead of the normal dense format.",
+ action="store_true",
+ dest="prettyxml",
+ default=False)
+parser.add_option("-b","--branches",
+ help="Tabulate the branch coverage instead of the line coverage.",
+ action="store_true",
+ dest="show_branch",
+ default=None)
+parser.add_option("-u","--sort-uncovered",
+ help="Sort entries by increasing number of uncovered lines.",
+ action="store_true",
+ dest="sort_uncovered",
+ default=None)
+parser.add_option("-p","--sort-percentage",
+ help="Sort entries by decreasing percentage of covered lines.",
+ action="store_true",
+ dest="sort_percent",
+ default=None)
+parser.usage="gcovr [options]"
+parser.description="A utility to run gcov and generate a simple report that summarizes the coverage"
+#
+# Process options
+#
+(options, args) = parser.parse_args(args=sys.argv)
+if options.version:
+ sys.stdout.write(
+ "gcovr %s\n"
+ "\n"
+ "Copyright (2008) Sandia Corporation. Under the terms of Contract\n"
+ "DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government\n"
+ "retains certain rights in this software.\n"
+ % (version_str(),) )
+ sys.exit(0)
+if options.objdir:
+ tmp = options.objdir.replace('/',os.sep).replace('\\',os.sep)
+ while os.sep+os.sep in tmp:
+ tmp = tmp.replace(os.sep+os.sep, os.sep)
+ if normpath(options.objdir) != tmp:
+ sys.stderr.write(
+ "(WARNING) relative referencing in --object-directory.\n"
+ "\tthis could cause strange errors when gcovr attempts to\n"
+ "\tidentify the original gcc working directory.\n")
+#
+# Setup filters
+#
+for i in range(0,len(options.exclude)):
+ options.exclude[i] = re.compile(options.exclude[i])
+if options.filter is not None:
+ options.filter = re.compile(options.filter)
+elif options.root is not None:
+ if not options.root:
+ sys.stderr.write(
+ "(ERROR) empty --root option.\n"
+ "\tRoot specifies the path to the root directory of your project.\n"
+ "\tThis option cannot be an empty string.\n")
+ sys.exit(1)
+ options.filter = re.compile(re.escape(os.path.abspath(options.root)+os.sep))
+if options.filter is None:
+ options.filter = re.compile('')
+#
+for i in range(0,len(options.gcov_exclude)):
+ options.gcov_exclude[i] = re.compile(options.gcov_exclude[i])
+if options.gcov_filter is not None:
+ options.gcov_filter = re.compile(options.gcov_filter)
+else:
+ options.gcov_filter = re.compile('')
+#
+# Get data files
+#
+if len(args) == 1:
+ datafiles = get_datafiles(["."], options)
+else:
+ datafiles = get_datafiles(args[1:], options)
+#
+# Get coverage data
+#
+covdata = {}
+for file in datafiles:
+ process_datafile(file,covdata,options)
+if options.verbose:
+ sys.stdout.write("Gathered coveraged data for "+str(len(covdata))+" files\n")
+#
+# Print report
+#
+if options.xml or options.prettyxml:
+ print_xml_report(covdata)
+else:
+ print_text_report(covdata)
diff --git a/Tools/Scripts/webkitpy/tool/grammar_unittest.py b/Tools/Scripts/webkitpy/tool/grammar_unittest.py
index cab71db01..dd8081f32 100644
--- a/Tools/Scripts/webkitpy/tool/grammar_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/grammar_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.tool.grammar import join_with_separators
@@ -36,6 +36,3 @@ class GrammarTest(unittest.TestCase):
self.assertEqual(join_with_separators(["one"]), "one")
self.assertEqual(join_with_separators(["one", "two"]), "one and two")
self.assertEqual(join_with_separators(["one", "two", "three"]), "one, two, and three")
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/main.py b/Tools/Scripts/webkitpy/tool/main.py
index 68348a05a..3fa6e6b5b 100755..100644
--- a/Tools/Scripts/webkitpy/tool/main.py
+++ b/Tools/Scripts/webkitpy/tool/main.py
@@ -48,6 +48,7 @@ class WebKitPatch(MultiCommandTool, Host):
make_option("--status-host", action="store", dest="status_host", type="string", help="Hostname (e.g. localhost or commit.webkit.org) where status updates should be posted."),
make_option("--bot-id", action="store", dest="bot_id", type="string", help="Identifier for this bot (if multiple bots are running for a queue)"),
make_option("--irc-password", action="store", dest="irc_password", type="string", help="Password to use when communicating via IRC."),
+ make_option("--seconds-to-sleep", action="store", default=120, type="int", help="Number of seconds to sleep in the task queue."),
make_option("--port", action="store", dest="port", default=None, help="Specify a port (e.g., mac, qt, gtk, ...)."),
]
@@ -61,8 +62,7 @@ class WebKitPatch(MultiCommandTool, Host):
self._irc = None
self._deprecated_port = None
- # FIXME: Rename this deprecated_port()
- def port(self):
+ def deprecated_port(self):
return self._deprecated_port
def path(self):
diff --git a/Tools/Scripts/webkitpy/tool/mocktool.py b/Tools/Scripts/webkitpy/tool/mocktool.py
index b8f0976bc..175d1b848 100644
--- a/Tools/Scripts/webkitpy/tool/mocktool.py
+++ b/Tools/Scripts/webkitpy/tool/mocktool.py
@@ -71,7 +71,7 @@ class MockTool(MockHost):
self.irc_password = "MOCK irc password"
self.wakeup_event = threading.Event()
- def port(self):
+ def deprecated_port(self):
return self._deprecated_port
def path(self):
@@ -83,6 +83,3 @@ class MockTool(MockHost):
def irc(self):
return self._irc
-
- def buildbot_for_builder_name(self, name):
- return MockBuildBot()
diff --git a/Tools/Scripts/webkitpy/tool/mocktool_unittest.py b/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
index cceaa2e0a..35fdd3aac 100644
--- a/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/mocktool_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from mocktool import MockOptions
@@ -53,7 +53,3 @@ class MockOptionsTest(unittest.TestCase):
# Test that keyword arguments work in the constructor.
options = MockOptions(foo='bar')
self.assertEqual(options.foo, 'bar')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/multicommandtool.py b/Tools/Scripts/webkitpy/tool/multicommandtool.py
index e2f91a7da..01b022f32 100644
--- a/Tools/Scripts/webkitpy/tool/multicommandtool.py
+++ b/Tools/Scripts/webkitpy/tool/multicommandtool.py
@@ -48,11 +48,12 @@ class TryAgain(Exception):
class Command(object):
name = None
show_in_main_help = False
- def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
- self.help_text = help_text
- self.long_help = long_help
- self.argument_names = argument_names
- self.required_arguments = self._parse_required_arguments(argument_names)
+ help_text = None
+ long_help = None
+ argument_names = None
+
+ def __init__(self, options=None, requires_local_commits=False):
+ self.required_arguments = self._parse_required_arguments(self.argument_names)
self.options = options
self.requires_local_commits = requires_local_commits
self._tool = None
@@ -139,15 +140,6 @@ class Command(object):
return self.check_arguments_and_execute(options, args)
-# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables.
-class AbstractDeclarativeCommand(Command):
- help_text = None
- argument_names = None
- long_help = None
- def __init__(self, options=None, **kwargs):
- Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
-
-
class HelpPrintingOptionParser(OptionParser):
def __init__(self, epilog_method=None, *args, **kwargs):
self.epilog_method = epilog_method
@@ -168,7 +160,7 @@ class HelpPrintingOptionParser(OptionParser):
return ""
-class HelpCommand(AbstractDeclarativeCommand):
+class HelpCommand(Command):
name = "help"
help_text = "Display information about this program or its subcommands"
argument_names = "[COMMAND]"
@@ -177,7 +169,7 @@ class HelpCommand(AbstractDeclarativeCommand):
options = [
make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
]
- AbstractDeclarativeCommand.__init__(self, options)
+ Command.__init__(self, options)
self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
def _help_epilog(self):
diff --git a/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py b/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
index ecb1df007..a498e6929 100644
--- a/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/multicommandtool_unittest.py
@@ -27,7 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
-import unittest
+import unittest2 as unittest
from optparse import make_option
@@ -38,8 +38,9 @@ from webkitpy.tool.multicommandtool import MultiCommandTool, Command, TryAgain
class TrivialCommand(Command):
name = "trivial"
show_in_main_help = True
+ help_text = "help text"
def __init__(self, **kwargs):
- Command.__init__(self, "help text", **kwargs)
+ Command.__init__(self, **kwargs)
def execute(self, options, args, tool):
pass
@@ -53,9 +54,10 @@ class UncommonCommand(TrivialCommand):
class LikesToRetry(Command):
name = "likes-to-retry"
show_in_main_help = True
+ help_text = "help text"
def __init__(self, **kwargs):
- Command.__init__(self, "help text", **kwargs)
+ Command.__init__(self, **kwargs)
self.execute_count = 0
def execute(self, options, args, tool):
@@ -66,9 +68,11 @@ class LikesToRetry(Command):
class CommandTest(unittest.TestCase):
def test_name_with_arguments(self):
- command_with_args = TrivialCommand(argument_names="ARG1 ARG2")
+ TrivialCommand.argument_names = "ARG1 ARG2"
+ command_with_args = TrivialCommand()
self.assertEqual(command_with_args.name_with_arguments(), "trivial ARG1 ARG2")
+ TrivialCommand.argument_names = None
command_with_args = TrivialCommand(options=[make_option("--my_option")])
self.assertEqual(command_with_args.name_with_arguments(), "trivial [options]")
@@ -80,10 +84,12 @@ class CommandTest(unittest.TestCase):
self.assertRaises(Exception, Command._parse_required_arguments, "[ARG1 ARG2]")
def test_required_arguments(self):
- two_required_arguments = TrivialCommand(argument_names="ARG1 ARG2 [ARG3]")
+ TrivialCommand.argument_names = "ARG1 ARG2 [ARG3]"
+ two_required_arguments = TrivialCommand()
expected_logs = "2 arguments required, 1 argument provided. Provided: 'foo' Required: ARG1 ARG2\nSee 'trivial-tool help trivial' for usage.\n"
exit_code = OutputCapture().assert_outputs(self, two_required_arguments.check_arguments_and_execute, [None, ["foo"], TrivialTool()], expected_logs=expected_logs)
self.assertEqual(exit_code, 1)
+ TrivialCommand.argument_names = None
class TrivialTool(MultiCommandTool):
@@ -167,11 +173,8 @@ See 'trivial-tool help COMMAND' for more information on a specific command.
def test_command_help(self):
- command_with_options = TrivialCommand(options=[make_option("--my_option")], long_help="LONG HELP")
+ TrivialCommand.long_help = "LONG HELP"
+ command_with_options = TrivialCommand(options=[make_option("--my_option")])
tool = TrivialTool(commands=[command_with_options])
expected_subcommand_help = "trivial [options] help text\n\nLONG HELP\n\nOptions:\n --my_option=MY_OPTION\n\n"
self._assert_tool_main_outputs(tool, ["tool", "help", "trivial"], expected_subcommand_help)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
index 77068acf4..b06984660 100644
--- a/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver.py
@@ -32,7 +32,7 @@ import urllib
from webkitpy.common.memoized import memoized
from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
-from webkitpy.layout_tests.port import builders
+from webkitpy.port import builders
_log = logging.getLogger(__name__)
@@ -54,7 +54,7 @@ class GardeningHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer
class GardeningHTTPRequestHandler(ReflectionHandler):
STATIC_FILE_NAMES = frozenset()
- STATIC_FILE_EXTENSIONS = ('.js', '.css', '.html', '.gif', '.png')
+ STATIC_FILE_EXTENSIONS = ('.js', '.css', '.html', '.gif', '.png', '.ico')
STATIC_FILE_DIRECTORY = os.path.join(
os.path.dirname(__file__),
diff --git a/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
index 438cc0583..9f9efe807 100644
--- a/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/servers/gardeningserver_unittest.py
@@ -28,11 +28,11 @@
import json
import sys
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models.test_configuration import *
-from webkitpy.layout_tests.port import builders
+from webkitpy.port import builders
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockTool
from webkitpy.common.system.executive_mock import MockExecutive
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
index 9e9c379d6..41a32ba54 100644
--- a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver.py
@@ -32,7 +32,8 @@ import os.path
import BaseHTTPServer
from webkitpy.common.host import Host # FIXME: This should not be needed!
-from webkitpy.layout_tests.port.base import Port
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.port.base import Port
from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
@@ -114,13 +115,13 @@ def _rebaseline_test(test_file, baseline_target, baseline_move_to, test_config,
destination_path = filesystem.join(
target_expectations_directory, destination_file)
filesystem.copyfile(source_path, destination_path)
- exit_code = scm.add(destination_path, return_exit_code=True)
- if exit_code:
+ try:
+ scm.add(destination_path)
+ log(' Updated %s' % destination_file)
+ except ScriptError, error:
log(' Could not update %s in SCM, exit code %d' %
- (destination_file, exit_code))
+ (destination_file, error.exit_code))
return False
- else:
- log(' Updated %s' % destination_file)
return True
@@ -150,13 +151,13 @@ def _move_test_baselines(test_file, extensions_to_move, source_platform, destina
source_path = filesystem.join(source_directory, file_name)
destination_path = filesystem.join(destination_directory, file_name)
filesystem.copyfile(source_path, destination_path)
- exit_code = test_config.scm.add(destination_path, return_exit_code=True)
- if exit_code:
+ try:
+ test_config.scm.add(destination_path)
+ log(' Moved %s' % file_name)
+ except ScriptError, error:
log(' Could not update %s in SCM, exit code %d' %
- (file_name, exit_code))
+ (file_name, error.exit_code))
return False
- else:
- log(' Moved %s' % file_name)
return True
diff --git a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
index f5c1cbf5e..721154cb6 100644
--- a/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/servers/rebaselineserver_unittest.py
@@ -27,12 +27,12 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
-import unittest
+import unittest2 as unittest
from webkitpy.common.net import resultsjsonparser_unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests.layout_package.json_results_generator import strip_json_wrapper
-from webkitpy.layout_tests.port.base import Port
+from webkitpy.port.base import Port
from webkitpy.tool.commands.rebaselineserver import TestConfig, RebaselineServer
from webkitpy.tool.servers import rebaselineserver
@@ -211,7 +211,7 @@ class RebaselineTestTest(unittest.TestCase):
server._test_config = get_test_config()
server._gather_baselines(results_json)
self.assertEqual(results_json['tests']['svg/dynamic-updates/SVGFEDropShadowElement-dom-stdDeviation-attr.html']['state'], 'needs_rebaseline')
- self.assertFalse('prototype-chocolate.html' in results_json['tests'])
+ self.assertNotIn('prototype-chocolate.html', results_json['tests'])
def _assertRebaseline(self, test_files, results_files, test_name, baseline_target, baseline_move_to, expected_success, expected_log):
log = []
@@ -234,7 +234,7 @@ class GetActualResultFilesTest(unittest.TestCase):
'fast/text2-actual.txt',
'fast/text-notactual.txt',
))
- self.assertEqual(
+ self.assertItemsEqual(
('text-actual.txt',),
rebaselineserver._get_actual_result_files(
'fast/text.html', test_config))
diff --git a/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py b/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py
index d269dfcf5..e1d562364 100644
--- a/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/servers/reflectionhandler_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.tool.servers.reflectionhandler import ReflectionHandler
diff --git a/Tools/Scripts/webkitpy/tool/steps/__init__.py b/Tools/Scripts/webkitpy/tool/steps/__init__.py
index 56429e8fe..655f7d50a 100644
--- a/Tools/Scripts/webkitpy/tool/steps/__init__.py
+++ b/Tools/Scripts/webkitpy/tool/steps/__init__.py
@@ -35,23 +35,23 @@ from webkitpy.tool.steps.attachtobug import AttachToBug
from webkitpy.tool.steps.build import Build
from webkitpy.tool.steps.checkstyle import CheckStyle
from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
-from webkitpy.tool.steps.cleanworkingdirectorywithlocalcommits import CleanWorkingDirectoryWithLocalCommits
from webkitpy.tool.steps.closebug import CloseBug
from webkitpy.tool.steps.closebugforlanddiff import CloseBugForLandDiff
from webkitpy.tool.steps.closepatch import ClosePatch
from webkitpy.tool.steps.commit import Commit
from webkitpy.tool.steps.confirmdiff import ConfirmDiff
from webkitpy.tool.steps.createbug import CreateBug
+from webkitpy.tool.steps.discardlocalchanges import DiscardLocalChanges
from webkitpy.tool.steps.editchangelog import EditChangeLog
from webkitpy.tool.steps.ensurebugisopenandassigned import EnsureBugIsOpenAndAssigned
from webkitpy.tool.steps.ensurelocalcommitifneeded import EnsureLocalCommitIfNeeded
+from webkitpy.tool.steps.haslanded import HasLanded
from webkitpy.tool.steps.obsoletepatches import ObsoletePatches
from webkitpy.tool.steps.options import Options
from webkitpy.tool.steps.postdiff import PostDiff
from webkitpy.tool.steps.postdiffforcommit import PostDiffForCommit
from webkitpy.tool.steps.postdiffforrevert import PostDiffForRevert
from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
-from webkitpy.tool.steps.preparechangelogfordepsroll import PrepareChangeLogForDEPSRoll
from webkitpy.tool.steps.preparechangelogforrevert import PrepareChangeLogForRevert
from webkitpy.tool.steps.promptforbugortitle import PromptForBugOrTitle
from webkitpy.tool.steps.reopenbugafterrollout import ReopenBugAfterRollout
@@ -60,6 +60,5 @@ from webkitpy.tool.steps.runtests import RunTests
from webkitpy.tool.steps.suggestreviewers import SuggestReviewers
from webkitpy.tool.steps.update import Update
from webkitpy.tool.steps.updatechangelogswithreviewer import UpdateChangeLogsWithReviewer
-from webkitpy.tool.steps.updatechromiumdeps import UpdateChromiumDEPS
from webkitpy.tool.steps.validatechangelogs import ValidateChangeLogs
from webkitpy.tool.steps.validatereviewer import ValidateReviewer
diff --git a/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py b/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py
index 9fab6f438..12be0bee2 100644
--- a/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/addsvnmimetypeforpng_unittest.py
@@ -21,7 +21,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.tool.steps.addsvnmimetypeforpng import AddSvnMimetypeForPng
from webkitpy.common.system.filesystem_mock import MockFileSystem
diff --git a/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py b/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py
index a978f4164..a740c3d3c 100644
--- a/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/applywatchlist_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
diff --git a/Tools/Scripts/webkitpy/tool/steps/build.py b/Tools/Scripts/webkitpy/tool/steps/build.py
index a2a627229..b02830ca2 100644
--- a/Tools/Scripts/webkitpy/tool/steps/build.py
+++ b/Tools/Scripts/webkitpy/tool/steps/build.py
@@ -48,7 +48,7 @@ class Build(AbstractStep):
environment.disable_gcc_smartquotes()
env = environment.to_dictionary()
- build_webkit_command = self._tool.port().build_webkit_command(build_style=build_style)
+ build_webkit_command = self._tool.deprecated_port().build_webkit_command(build_style=build_style)
self._tool.executive.run_and_throw_if_fail(build_webkit_command, self._options.quiet,
cwd=self._tool.scm().checkout_root, env=env)
diff --git a/Tools/Scripts/webkitpy/tool/steps/checkstyle.py b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py
index 0cb15f4c1..cec8a8132 100644
--- a/Tools/Scripts/webkitpy/tool/steps/checkstyle.py
+++ b/Tools/Scripts/webkitpy/tool/steps/checkstyle.py
@@ -57,7 +57,7 @@ class CheckStyle(AbstractStep):
args.append(self._options.check_style_filter)
try:
- self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
+ self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
if self._options.non_interactive:
# We need to re-raise the exception here to have the
diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py
index 191352440..a4cbe82c5 100644
--- a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py
+++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory.py
@@ -28,12 +28,10 @@
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.executive import ScriptError
class CleanWorkingDirectory(AbstractStep):
- def __init__(self, tool, options, allow_local_commits=False):
- AbstractStep.__init__(self, tool, options)
- self._allow_local_commits = allow_local_commits
@classmethod
def options(cls):
@@ -45,6 +43,8 @@ class CleanWorkingDirectory(AbstractStep):
def run(self, state):
if not self._options.clean:
return
- if not self._allow_local_commits:
- self._tool.scm().ensure_no_local_commits(self._options.force_clean)
- self._tool.scm().ensure_clean_working_directory(force_clean=self._options.force_clean)
+
+ if self._tool.scm().has_working_directory_changes() and not self._options.force_clean:
+ raise ScriptError("Working directory has changes, pass --force-clean to continue.")
+
+ self._tool.scm().discard_working_directory_changes()
diff --git a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py
index 15a8850a5..7e31a9bd8 100644
--- a/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/cleanworkingdirectory_unittest.py
@@ -26,27 +26,43 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.cleanworkingdirectory import CleanWorkingDirectory
+from webkitpy.common.system.executive import ScriptError
class CleanWorkingDirectoryTest(unittest.TestCase):
- def test_run(self):
+ def test_run_working_directory_changes_no_force(self):
tool = MockTool()
tool._scm = Mock()
- tool._scm.checkout_root = '/mock-checkout'
step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
+ tool._scm.has_working_directory_changes = lambda: True
+ self.assertRaises(ScriptError, step.run, {})
+ self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 0)
+
+ def test_run_working_directory_changes_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=True))
+ tool._scm.has_working_directory_changes = lambda: True
+ step.run({})
+ self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
+
+ def test_run_no_local_changes(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ step = CleanWorkingDirectory(tool, MockOptions(clean=True, force_clean=False))
+ tool._scm.has_working_directory_changes = lambda: False
+ tool._scm.has_local_commits = lambda: False
step.run({})
- self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 1)
- self.assertEqual(tool._scm.ensure_clean_working_directory.call_count, 1)
+ self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 1)
def test_no_clean(self):
tool = MockTool()
tool._scm = Mock()
step = CleanWorkingDirectory(tool, MockOptions(clean=False))
step.run({})
- self.assertEqual(tool._scm.ensure_no_local_commits.call_count, 0)
- self.assertEqual(tool._scm.ensure_clean_working_directory.call_count, 0)
+ self.assertEqual(tool._scm.discard_working_directory_changes.call_count, 0)
diff --git a/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py
index 6969c4e9a..b042d4258 100644
--- a/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/closebugforlanddiff_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
diff --git a/Tools/Scripts/webkitpy/tool/steps/commit.py b/Tools/Scripts/webkitpy/tool/steps/commit.py
index 2bffa4c2a..1d5109a00 100644
--- a/Tools/Scripts/webkitpy/tool/steps/commit.py
+++ b/Tools/Scripts/webkitpy/tool/steps/commit.py
@@ -40,20 +40,17 @@ _log = logging.getLogger(__name__)
class Commit(AbstractStep):
- # FIXME: This option exists only to make sure we don't break scripts which include --ignore-builders
- # You can safely delete this option any time after 11/01/11.
@classmethod
def options(cls):
return AbstractStep.options() + [
- Options.check_builders,
Options.non_interactive,
]
def _commit_warning(self, error):
- working_directory_message = "" if error.working_directory_is_clean else " and working copy changes"
- return ('There are %s local commits%s. Everything will be committed as a single commit. '
+ return ('There are %s local commits (and possibly changes in the working directory. '
+ 'Everything will be committed as a single commit. '
'To avoid this prompt, set "git config webkit-patch.commit-should-always-squash true".' % (
- error.num_local_commits, working_directory_message))
+ error.num_local_commits))
def _check_test_expectations(self, changed_files):
test_expectations_files = [filename for filename in changed_files if filename.endswith('TestExpectations')]
@@ -63,7 +60,7 @@ class Commit(AbstractStep):
args = ["--diff-files"]
args.extend(test_expectations_files)
try:
- self._tool.executive.run_and_throw_if_fail(self._tool.port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
+ self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().check_webkit_style_command() + args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
if self._options.non_interactive:
raise
@@ -76,12 +73,11 @@ class Commit(AbstractStep):
raise Exception("Attempted to commit with a commit message shorter than 10 characters. Either your patch is missing a ChangeLog or webkit-patch may have a bug.")
self._check_test_expectations(self._changed_files(state))
-
self._state = state
username = None
password = None
- force_squash = False
+ force_squash = self._options.non_interactive
num_tries = 0
while num_tries < 3:
@@ -95,7 +91,7 @@ class Commit(AbstractStep):
self._state["commit_text"] = commit_text
break;
except AmbiguousCommitError, e:
- if self._options.non_interactive or self._tool.user.confirm(self._commit_warning(e)):
+ if self._tool.user.confirm(self._commit_warning(e)):
force_squash = True
else:
# This will correctly interrupt the rest of the commit process.
diff --git a/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py b/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
index 936e3ebab..c6b76b428 100644
--- a/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/commit_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.executive import ScriptError
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py b/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges.py
index 4bbd383ae..8a84cc702 100644
--- a/Tools/Scripts/webkitpy/tool/steps/preparechangelogfordepsroll.py
+++ b/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
+# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
@@ -26,13 +26,27 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.common.checkout.changelog import ChangeLog
from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.tool.steps.options import Options
+from webkitpy.common.system.executive import ScriptError
-class PrepareChangeLogForDEPSRoll(AbstractStep):
+class DiscardLocalChanges(AbstractStep):
+
+ @classmethod
+ def options(cls):
+ return AbstractStep.options() + [
+ Options.clean,
+ Options.force_clean,
+ ]
+
def run(self, state):
- self._tool.executive.run_and_throw_if_fail(self._tool.port().prepare_changelog_command())
- changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)
- for changelog_path in changelog_paths:
- ChangeLog(changelog_path).update_with_unreviewed_message("Unreviewed. Rolled DEPS.\n\n")
+ if not self._options.clean:
+ return
+
+ if not self._options.force_clean:
+ if self._tool.scm().has_working_directory_changes():
+ raise ScriptError("Working directory has changes, pass --force-clean to continue.")
+ if self._tool.scm().has_local_commits():
+ raise ScriptError("Repository has local commits, pass --force-clean to continue.")
+ self._tool.scm().discard_local_changes()
diff --git a/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges_unittest.py b/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges_unittest.py
new file mode 100644
index 000000000..d38fc926c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/discardlocalchanges_unittest.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+
+from webkitpy.thirdparty.mock import Mock
+from webkitpy.tool.mocktool import MockOptions, MockTool
+from webkitpy.tool.steps.discardlocalchanges import DiscardLocalChanges
+from webkitpy.common.system.executive import ScriptError
+
+
+class DiscardLocalChangesTest(unittest.TestCase):
+ def test_skip_on_clean(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ step = DiscardLocalChanges(tool, MockOptions(clean=False))
+ step.run({})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 0)
+
+ def test_working_changes_exist_with_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ tool._scm.has_working_directory_changes = lambda: True
+ tool._scm.has_local_commits = lambda: False
+ step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=True))
+ step.run({})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 1)
+
+ def test_local_commits_exist_with_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ tool._scm.has_working_directory_changes = lambda: False
+ tool._scm.has_local_commits = lambda: True
+ step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=True))
+ step.run({})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 1)
+
+ def test_local_commits_and_working_changes_exist_with_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ tool._scm.has_working_directory_changes = lambda: True
+ tool._scm.has_local_commits = lambda: True
+ step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=True))
+ step.run({})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 1)
+
+ def test_no_changes_exist_with_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ tool._scm.has_working_directory_changes = lambda: False
+ tool._scm.has_local_commits = lambda: False
+ step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=True))
+ step.run({})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 1)
+
+ def test_error_working_changes_exist_without_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ tool._scm.has_working_directory_changes = lambda: True
+ tool._scm.has_local_commits = lambda: False
+ step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=False))
+ self.assertRaises(ScriptError, step.run, {})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 0)
+
+ def test_error_local_commits_exist_without_force(self):
+ tool = MockTool()
+ tool._scm = Mock()
+ tool._scm.has_working_directory_changes = lambda: False
+ tool._scm.has_local_commits = lambda: True
+ step = DiscardLocalChanges(tool, MockOptions(clean=True, force_clean=False))
+ self.assertRaises(ScriptError, step.run, {})
+ self.assertEqual(tool._scm.discard_local_changes.call_count, 0)
diff --git a/Tools/Scripts/webkitpy/tool/steps/haslanded.py b/Tools/Scripts/webkitpy/tool/steps/haslanded.py
new file mode 100644
index 000000000..b0692b32b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/haslanded.py
@@ -0,0 +1,120 @@
+# Copyright (C) 2010 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import cStringIO as StringIO
+import logging
+import sys
+import re
+import tempfile
+
+from webkitpy.tool.steps.abstractstep import AbstractStep
+from webkitpy.common.system.executive import Executive, ScriptError
+from webkitpy.common.checkout import diff_parser
+
+from webkitpy.tool.steps import confirmdiff
+
+_log = logging.getLogger(__name__)
+
+
+class HasLanded(confirmdiff.ConfirmDiff):
+
+ @classmethod
+ def convert_to_svn(cls, diff):
+ lines = StringIO.StringIO(diff).readlines()
+ convert = diff_parser.get_diff_converter(lines)
+ return "".join(convert(x) for x in lines)
+
+ @classmethod
+ def strip_change_log(cls, diff):
+ output = []
+ skipping = False
+ for line in StringIO.StringIO(diff).readlines():
+ indexline = re.match("^Index: ([^\\n]*/)?([^/\\n]*)$", line)
+ if skipping and indexline:
+ skipping = False
+ if indexline and indexline.group(2) == "ChangeLog":
+ skipping = True
+ if not skipping:
+ output.append(line)
+ return "".join(output)
+
+ @classmethod
+ def diff_diff(cls, diff1, diff2, diff1_suffix, diff2_suffix, executive=None):
+ # Now this is where it gets complicated, we need to compare our diff to the diff at landed_revision.
+ diff1_patch = tempfile.NamedTemporaryFile(suffix=diff1_suffix + '.patch')
+ diff1_patch.write(diff1)
+ diff1_patch.flush()
+
+ # Check if there are any differences in the patch that don't happen
+ diff2_patch = tempfile.NamedTemporaryFile(suffix=diff2_suffix + '.patch')
+ diff2_patch.write(diff2)
+ diff2_patch.flush()
+
+ # Diff the two diff's together...
+ if not executive:
+ executive = Executive()
+
+ try:
+ return executive.run_command(
+ ["interdiff", diff1_patch.name, diff2_patch.name], decode_output=False)
+ except ScriptError, e:
+ _log.warning("Unable to find interdiff util (part of GNU difftools package) which is required.")
+ raise
+
+ def run(self, state):
+ # Check if there are changes first
+ if not self._tool.scm().local_changes_exist():
+ _log.warn("No local changes found, exiting.")
+ return True
+
+ # Check if there is a SVN revision in the bug from the commit queue
+ landed_revision = self.cached_lookup(state, "bug").commit_revision()
+ if not landed_revision:
+ raise ScriptError("Unable to find landed message in associated bug.")
+
+ # Now this is there it gets complicated, we need to compare our diff to the diff at landed_revision.
+ landed_diff_bin = self._tool.scm().diff_for_revision(landed_revision)
+ landed_diff_trimmed = self.strip_change_log(self.convert_to_svn(landed_diff_bin))
+
+ # Check if there are any differences in the patch that don't happen
+ local_diff_bin = self._tool.scm().create_patch()
+ local_diff_trimmed = self.strip_change_log(self.convert_to_svn(local_diff_bin))
+
+ # Diff the two diff's together...
+ diff_diff = self.diff_diff(landed_diff_trimmed, local_diff_trimmed,
+ '-landed', '-local',
+ executive=self._tool.executive)
+
+ with self._show_pretty_diff(diff_diff) as pretty_diff_file:
+ if not pretty_diff_file:
+ self._tool.user.page(diff_diff)
+
+ if self._tool.user.confirm("May I discard local changes?"):
+ # Discard changes if the user confirmed we should
+ _log.warn("Discarding changes as requested.")
+ self._tool.scm().discard_local_changes()
diff --git a/Tools/Scripts/webkitpy/tool/steps/haslanded_unittest.py b/Tools/Scripts/webkitpy/tool/steps/haslanded_unittest.py
new file mode 100644
index 000000000..3a67029a8
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/steps/haslanded_unittest.py
@@ -0,0 +1,299 @@
+# Copyright (C) 2009 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest2 as unittest
+import subprocess
+
+from webkitpy.tool.steps.haslanded import HasLanded
+
+
+class HasLandedTest(unittest.TestCase):
+ maxDiff = None
+
+ @unittest.skipUnless(subprocess.call('which interdiff', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0, "requires interdiff")
+ def test_run(self):
+ # These patches require trailing whitespace to remain valid patches.
+ diff1 = """\
+Index: a.py
+===================================================================
+--- a.py
++++ a.py
+@@ -1,3 +1,5 @@
+ A
+ B
+ C
++D
++E
+Index: b.py
+===================================================================
+--- b.py 2013-01-21 15:20:59.693887185 +1100
++++ b.py 2013-01-21 15:22:24.382555711 +1100
+@@ -1,3 +1,5 @@
+ 1
+ 2
+ 3
++4
++5
+"""
+
+ diff1_add_line = """\
+Index: a.py
+===================================================================
+--- a.py
++++ a.py
+@@ -1,3 +1,6 @@
+ A
+ B
+ C
++D
++E
++F
+Index: b.py
+===================================================================
+--- b.py
++++ b.py
+@@ -1,3 +1,5 @@
+ 1
+ 2
+ 3
++4
++5
+"""
+
+ diff1_remove_line = """\
+Index: a.py
+===================================================================
+--- a.py
++++ a.py
+@@ -1,3 +1,4 @@
+ A
+ B
+ C
++D
+Index: b.py
+===================================================================
+--- b.py
++++ b.py
+@@ -1,3 +1,5 @@
+ 1
+ 2
+ 3
++4
++5
+"""
+
+ diff1_add_file = diff1 + """\
+Index: c.py
+===================================================================
+--- c.py
++++ c.py
+@@ -1,3 +1,5 @@
+ 1
+ 2
+ 3
++4
++5
+"""
+
+ diff1_remove_file = """\
+Index: a.py
+===================================================================
+--- a.py
++++ a.py
+@@ -1,3 +1,5 @@
+ A
+ B
+ C
++D
++E
+"""
+ self.assertMultiLineEqual(
+ HasLanded.diff_diff(diff1, diff1_add_line, '', 'add-line'),
+ """\
+diff -u a.py a.py
+--- a.py
++++ a.py
+@@ -5,0 +6 @@
++F
+""")
+
+ self.assertMultiLineEqual(
+ HasLanded.diff_diff(diff1, diff1_remove_line, '', 'remove-line'),
+ """\
+diff -u a.py a.py
+--- a.py
++++ a.py
+@@ -5 +4,0 @@
+-E
+""")
+ self.assertMultiLineEqual(
+ HasLanded.diff_diff(diff1, diff1_add_file, '', 'add-file'),
+ """\
+only in patch2:
+unchanged:
+--- c.py
++++ c.py
+@@ -1,3 +1,5 @@
+ 1
+ 2
+ 3
++4
++5
+""")
+ self.assertMultiLineEqual(
+ HasLanded.diff_diff(diff1, diff1_remove_file, '', 'remove-file'),
+ """\
+reverted:
+--- b.py 2013-01-21 15:22:24.382555711 +1100
++++ b.py 2013-01-21 15:20:59.693887185 +1100
+@@ -1,5 +1,3 @@
+ 1
+ 2
+ 3
+-4
+-5
+""")
+
+ def test_convert_to_svn_and_strip_change_log(self):
+ # These patches require trailing whitespace to remain valid patches.
+ testbefore1 = HasLanded.convert_to_svn("""\
+diff --git a/Tools/ChangeLog b/Tools/ChangeLog
+index 219ba72..0390b73 100644
+--- a/Tools/ChangeLog
++++ b/Tools/ChangeLog
+@@ -1,3 +1,32 @@
++2013-01-17 Tim 'mithro' Ansell <mithro@mithis.com>
++
++ Adding "has-landed" command to webkit-patch which allows a person to
++ Reviewed by NOBODY (OOPS!).
++
+ 2013-01-20 Tim 'mithro' Ansell <mithro@mithis.com>
+
+ Extend diff_parser to support the --full-index output.
+diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+index 4bf8ec6..3a128cb 100644
+--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
++++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+""")
+ testafter1 = HasLanded.convert_to_svn("""\
+diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+index 4bf8ec6..3a128cb 100644
+--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
++++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+diff --git a/Tools/ChangeLog b/Tools/ChangeLog
+index 219ba72..0390b73 100644
+--- a/Tools/ChangeLog
++++ b/Tools/ChangeLog
+@@ -1,3 +1,32 @@
++2013-01-17 Tim 'mithro' Ansell <mithro@mithis.com>
++
++ Adding "has-landed" command to webkit-patch which allows a person to
++ Reviewed by NOBODY (OOPS!).
++
+ 2013-01-20 Tim 'mithro' Ansell <mithro@mithis.com>
+
+ Extend diff_parser to support the --full-index output.
+""")
+ testexpected1 = """\
+Index: Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+===================================================================
+--- Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
++++ Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+"""
+ testmiddle1 = HasLanded.convert_to_svn("""\
+diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+index 4bf8ec6..3a128cb 100644
+--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
++++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+diff --git a/ChangeLog b/ChangeLog
+index 219ba72..0390b73 100644
+--- a/ChangeLog
++++ b/ChangeLog
+@@ -1,3 +1,32 @@
++2013-01-17 Tim 'mithro' Ansell <mithro@mithis.com>
++
++ Adding "has-landed" command to webkit-patch which allows a person to
++ Reviewed by NOBODY (OOPS!).
++
+ 2013-01-20 Tim 'mithro' Ansell <mithro@mithis.com>
+
+ Extend diff_parser to support the --full-index output.
+diff --git a/Tools/Scripts/webkitpy/common/other.py b/Tools/Scripts/webkitpy/common/other.py
+index 4bf8ec6..3a128cb 100644
+--- a/Tools/Scripts/webkitpy/common/other.py
++++ b/Tools/Scripts/webkitpy/common/other.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+""")
+ testexpected2 = """\
+Index: Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+===================================================================
+--- Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
++++ Tools/Scripts/webkitpy/common/net/bugzilla/bug.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+Index: Tools/Scripts/webkitpy/common/other.py
+===================================================================
+--- Tools/Scripts/webkitpy/common/other.py
++++ Tools/Scripts/webkitpy/common/other.py
+@@ -28,6 +28,8 @@
++import re
++
+ from .attachment import Attachment
+
+"""
+
+ self.assertMultiLineEqual(testexpected1, HasLanded.strip_change_log(testbefore1))
+ self.assertMultiLineEqual(testexpected1, HasLanded.strip_change_log(testafter1))
+ self.assertMultiLineEqual(testexpected2, HasLanded.strip_change_log(testmiddle1))
diff --git a/Tools/Scripts/webkitpy/tool/steps/options.py b/Tools/Scripts/webkitpy/tool/steps/options.py
index c29e59d9c..7eda61459 100644
--- a/Tools/Scripts/webkitpy/tool/steps/options.py
+++ b/Tools/Scripts/webkitpy/tool/steps/options.py
@@ -33,7 +33,6 @@ class Options(object):
build = make_option("--build", action="store_true", dest="build", default=False, help="Build and run run-webkit-tests before committing.")
build_style = make_option("--build-style", action="store", dest="build_style", default=None, help="Whether to build debug, release, or both.")
cc = make_option("--cc", action="store", type="string", dest="cc", help="Comma-separated list of email addresses to carbon-copy.")
- check_builders = make_option("--ignore-builders", action="store_false", dest="check_builders", default=True, help="DEPRECATED: Will be removed any time after 11/01/11.")
check_style = make_option("--ignore-style", action="store_false", dest="check_style", default=True, help="Don't check to see if the patch has proper style before uploading.")
check_style_filter = make_option("--check-style-filter", action="store", type="string", dest="check_style_filter", default=None, help="Filter style-checker rules (see check-webkit-style --help).")
clean = make_option("--no-clean", action="store_false", dest="clean", default=True, help="Don't check if the working directory is clean before applying patches")
@@ -57,4 +56,5 @@ class Options(object):
suggest_reviewers = make_option("--suggest-reviewers", action="store_true", default=False, help="Offer to CC appropriate reviewers.")
test = make_option("--test", action="store_true", dest="test", default=False, help="Run run-webkit-tests before committing.")
update = make_option("--no-update", action="store_false", dest="update", default=True, help="Don't update the working directory.")
+ update_changelogs = make_option("--update-changelogs", action="store_true", dest="update_changelogs", default=False, help="Update existing ChangeLog entries with new date, bug description, and touched files/functions.")
changelog_count = make_option("--changelog-count", action="store", type="int", dest="changelog_count", help="Number of changelogs to parse.")
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py
index 4d80ab61f..716ab826d 100644
--- a/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog.py
@@ -27,6 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
+import re
import sys
from webkitpy.common.checkout.changelog import ChangeLog
@@ -44,6 +45,7 @@ class PrepareChangeLog(AbstractStep):
Options.quiet,
Options.email,
Options.git_commit,
+ Options.update_changelogs,
]
def _ensure_bug_url(self, state):
@@ -52,17 +54,60 @@ class PrepareChangeLog(AbstractStep):
bug_id = state.get("bug_id")
changelogs = self.cached_lookup(state, "changelogs")
for changelog_path in changelogs:
- changelog = ChangeLog(changelog_path)
+ changelog = ChangeLog(changelog_path, self._tool.filesystem)
if not changelog.latest_entry().bug_id():
changelog.set_short_description_and_bug_url(
self.cached_lookup(state, "bug_title"),
self._tool.bugs.bug_url_for_bug_id(bug_id))
+ def _resolve_existing_entry(self, changelog_path):
+ # When this is called, the top entry in the ChangeLog was just created
+ # by prepare-ChangeLog, as an clean updated version of the one below it.
+ with self._tool.filesystem.open_text_file_for_reading(changelog_path) as changelog_file:
+ entries_gen = ChangeLog.parse_entries_from_file(changelog_file)
+ entries = zip(entries_gen, range(2))
+
+ if not len(entries):
+ raise Exception("Expected to find at least two ChangeLog entries in %s but found none." % changelog_path)
+ if len(entries) == 1:
+ # If we get here, it probably means we've just rolled over to a
+ # new CL file, so we don't have anything to resolve.
+ return
+
+ (new_entry, _), (old_entry, _) = entries
+ final_entry = self._merge_entries(old_entry, new_entry)
+
+ changelog = ChangeLog(changelog_path, self._tool.filesystem)
+ changelog.delete_entries(2)
+ changelog.prepend_text(final_entry)
+
+ def _merge_entries(self, old_entry, new_entry):
+ final_entry = old_entry.contents()
+
+ final_entry = final_entry.replace(old_entry.date(), new_entry.date(), 1)
+
+ new_bug_desc = new_entry.bug_description()
+ old_bug_desc = old_entry.bug_description()
+ if new_bug_desc and old_bug_desc and new_bug_desc != old_bug_desc:
+ final_entry = final_entry.replace(old_bug_desc, new_bug_desc)
+
+ new_touched = new_entry.touched_functions()
+ old_touched = old_entry.touched_functions()
+ if new_touched != old_touched:
+ if old_entry.is_touched_files_text_clean():
+ final_entry = final_entry.replace(old_entry.touched_files_text(), new_entry.touched_files_text())
+ else:
+ final_entry += "\n" + new_entry.touched_files_text()
+
+ return final_entry + "\n"
+
def run(self, state):
if self.cached_lookup(state, "changelogs"):
self._ensure_bug_url(state)
- return
- args = self._tool.port().prepare_changelog_command()
+ if not self._options.update_changelogs:
+ return
+
+ args = self._tool.deprecated_port().prepare_changelog_command()
if state.get("bug_id"):
args.append("--bug=%s" % state["bug_id"])
args.append("--description=%s" % self.cached_lookup(state, 'bug_title'))
@@ -75,8 +120,15 @@ class PrepareChangeLog(AbstractStep):
args.extend(self._changed_files(state))
try:
- self._tool.executive.run_and_throw_if_fail(args, self._options.quiet, cwd=self._tool.scm().checkout_root)
+ output = self._tool.executive.run_and_throw_if_fail(args, self._options.quiet, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.error("Unable to prepare ChangeLogs.")
sys.exit(1)
+
+ # These are the ChangeLog entries added by prepare-Changelog
+ changelogs = re.findall(r'Editing the (\S*/ChangeLog) file.', output)
+ changelogs = set(self._tool.filesystem.join(self._tool.scm().checkout_root, f) for f in changelogs)
+ for changelog in changelogs & set(self.cached_lookup(state, "changelogs")):
+ self._resolve_existing_entry(changelog)
+
self.did_modify_checkout(state)
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py
index fc31d1fa9..803f072a3 100644
--- a/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelog_unittest.py
@@ -26,32 +26,108 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import os
-import unittest
+import unittest2 as unittest
# Do not import changelog_unittest.ChangeLogTest directly as that will cause it to be run again.
from webkitpy.common.checkout import changelog_unittest
+from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.preparechangelog import PrepareChangeLog
-
class PrepareChangeLogTest(changelog_unittest.ChangeLogTest):
+ def test_resolve_existing_entry(self):
+ step = PrepareChangeLog(MockTool(), MockOptions())
+
+ headers = ["2013-01-18 Timothy Loh <timloh@chromium.com>\n\n",
+ "2013-01-20 Timothy Loh <timloh@chromium.com>\n\n",
+ u"2009-08-17 Tor Arne Vestb\xf8 <vestbo@webkit.org>\n\n",
+ u"2009-08-18 Tor Arne Vestb\xf8 <vestbo@webkit.org>\n\n",
+ "2013-01-18 Eric Seidel <eric@webkit.org>\n\n",
+ "2013-01-20 Eric Seidel <eric@webkit.org>\n\n",
+ ]
+
+ bug_descs = [" prepare-Changelog should support updating the list of changed files\n",
+ " webkit-patch upload should support updating the list of changed files\n"]
+
+ bug_url = " https://bugs.webkit.org/show_bug.cgi?id=74358\n\n"
+
+ descriptions = ["", " A description of the changes.\n\n",
+ " A description.\n\n With some\n line breaks\n\n"]
+
+ changes = [
+""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
+ (PrepareChangeLog):
+ (PrepareChangeLog.run):\n\n""",
+""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
+ (PrepareChangeLog._resolve_existing_entry):
+ (PrepareChangeLog):
+ (PrepareChangeLog.run):\n\n""",
+""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
+ (PrepareChangeLog): Some annotations
+ (PrepareChangeLog.run):
+ More annotations\n\n""",
+""" * Scripts/webkitpy/tool/steps/preparechangelog.py:
+ (PrepareChangeLog): Some annotations
+ (PrepareChangeLog.run):
+ More annotations
+
+ * Scripts/webkitpy/tool/steps/preparechangelog.py:
+ (PrepareChangeLog._resolve_existing_entry):
+ (PrepareChangeLog):
+ (PrepareChangeLog.run):\n\n""",
+ ]
+
+ def make_entry(indices):
+ a, b, c, d = indices
+ return headers[a] + bug_descs[b] + bug_url + descriptions[c] + changes[d]
+
+ test_cases = [((0, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0)),
+ ((0, 0, 0, 0), (0, 0, 1, 0), (0, 0, 1, 0)),
+ ((1, 0, 0, 0), (0, 0, 2, 0), (1, 0, 2, 0)),
+ ((0, 1, 0, 0), (0, 0, 1, 0), (0, 1, 1, 0)),
+ ((0, 0, 0, 1), (0, 0, 0, 0), (0, 0, 0, 1)),
+ ((0, 0, 0, 0), (0, 0, 1, 1), (0, 0, 1, 0)),
+ ((0, 0, 0, 0), (0, 0, 2, 2), (0, 0, 2, 2)),
+ ((0, 0, 0, 1), (0, 0, 1, 2), (0, 0, 1, 3)),
+ ((1, 1, 0, 1), (0, 0, 0, 2), (1, 1, 0, 3)),
+ ((3, 0, 0, 0), (2, 0, 1, 0), (3, 0, 1, 0)),
+ ((4, 0, 0, 0), (0, 0, 0, 0), (0, 0, 0, 0)),
+ ((5, 0, 0, 0), (0, 0, 0, 0), (1, 0, 0, 0)),
+ ((0, 0, 0, 0), (4, 0, 0, 0), (4, 0, 0, 0)),
+ ((1, 0, 0, 0), (4, 0, 0, 0), (5, 0, 0, 0)),
+ ]
+
+ for new, old, final in test_cases:
+ new_entry = make_entry(new)
+ old_entry = make_entry(old)
+ start_file = new_entry + old_entry + self._rolled_over_footer
+
+ final_entry = make_entry(final)
+ end_file = final_entry + self._rolled_over_footer
+
+ path = "ChangeLog"
+ step._tool.filesystem = MockFileSystem()
+ step._tool.filesystem.write_text_file(path, start_file)
+ step._resolve_existing_entry(path)
+ actual_output = step._tool.filesystem.read_text_file(path)
+ self.assertEquals(actual_output, end_file)
+
def test_ensure_bug_url(self):
- # FIXME: This should use a MockFileSystem instead of a real FileSystem.
capture = OutputCapture()
step = PrepareChangeLog(MockTool(), MockOptions())
changelog_contents = u"%s\n%s" % (self._new_entry_boilerplate, self._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
+ changelog_path = "ChangeLog"
state = {
"bug_title": "Example title",
"bug_id": 1234,
"changelogs": [changelog_path],
}
- capture.assert_outputs(self, step.run, [state])
- actual_contents = self._read_file_contents(changelog_path, "utf-8")
+ step._tool.filesystem = MockFileSystem()
+ step._tool.filesystem.write_text_file(changelog_path, changelog_contents)
+ capture.assert_outputs(self, step._ensure_bug_url, [state])
+ actual_contents = step._tool.filesystem.read_text_file(changelog_path)
expected_message = "Example title\n http://example.com/1234"
expected_contents = changelog_contents.replace("Need a short description (OOPS!).\n Need the bug URL (OOPS!).", expected_message)
- os.remove(changelog_path)
- self.assertEqual(actual_contents.splitlines(), expected_contents.splitlines())
+ self.assertEqual(actual_contents, expected_contents)
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py
index 95a99c320..82e7b0252 100644
--- a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert.py
@@ -48,7 +48,7 @@ class PrepareChangeLogForRevert(AbstractStep):
def run(self, state):
# This could move to prepare-ChangeLog by adding a --revert= option.
- self._tool.executive.run_and_throw_if_fail(self._tool.port().prepare_changelog_command(), cwd=self._tool.scm().checkout_root)
+ self._tool.executive.run_and_throw_if_fail(self._tool.deprecated_port().prepare_changelog_command(), cwd=self._tool.scm().checkout_root)
changelog_paths = self._tool.checkout().modified_changelogs(git_commit=None)
bug_url = self._tool.bugs.bug_url_for_bug_id(state["bug_id"]) if state["bug_id"] else None
message = self._message_for_revert(state["revision_list"], state["reason"], bug_url)
diff --git a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py
index b82cb4aa2..3ec6e9a60 100644
--- a/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/preparechangelogforrevert_unittest.py
@@ -26,27 +26,17 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import codecs
-import os
-import tempfile
-import unittest
+import unittest2 as unittest
# Do not import changelog_unittest.ChangeLogTest directly as that will cause it to be run again.
from webkitpy.common.checkout import changelog_unittest
from webkitpy.common.checkout.changelog import ChangeLog
+from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.tool.steps.preparechangelogforrevert import *
class UpdateChangeLogsForRevertTest(unittest.TestCase):
- @staticmethod
- def _write_tmp_file_with_contents(byte_array):
- assert(isinstance(byte_array, str))
- (file_descriptor, file_path) = tempfile.mkstemp() # NamedTemporaryFile always deletes the file on close in python < 2.6
- with os.fdopen(file_descriptor, "w") as file:
- file.write(byte_array)
- return file_path
-
_revert_entry_with_bug_url = '''2009-08-19 Eric Seidel <eric@webkit.org>
Unreviewed, rolling out r12345.
@@ -110,13 +100,13 @@ class UpdateChangeLogsForRevertTest(unittest.TestCase):
def _assert_message_for_revert_output(self, args, expected_entry):
changelog_contents = u"%s\n%s" % (changelog_unittest.ChangeLogTest._new_entry_boilerplate, changelog_unittest.ChangeLogTest._example_changelog)
- changelog_path = self._write_tmp_file_with_contents(changelog_contents.encode("utf-8"))
- changelog = ChangeLog(changelog_path)
+ changelog_path = "ChangeLog"
+ fs = MockFileSystem({changelog_path: changelog_contents.encode("utf-8")})
+ changelog = ChangeLog(changelog_path, fs)
changelog.update_with_unreviewed_message(PrepareChangeLogForRevert._message_for_revert(*args))
actual_entry = changelog.latest_entry()
- os.remove(changelog_path)
- self.assertEqual(actual_entry.contents(), expected_entry)
- self.assertEqual(actual_entry.reviewer_text(), None)
+ self.assertMultiLineEqual(actual_entry.contents(), expected_entry)
+ self.assertIsNone(actual_entry.reviewer_text())
# These checks could be removed to allow this to work on other entries:
self.assertEqual(actual_entry.author_name(), "Eric Seidel")
self.assertEqual(actual_entry.author_email(), "eric@webkit.org")
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests.py b/Tools/Scripts/webkitpy/tool/steps/runtests.py
index 6dc90f92c..a45628b2d 100644
--- a/Tools/Scripts/webkitpy/tool/steps/runtests.py
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests.py
@@ -1,9 +1,9 @@
# Copyright (C) 2010 Google Inc. All rights reserved.
-#
+#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -13,7 +13,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -27,7 +27,9 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
-
+import os
+import platform
+import sys
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.system.executive import ScriptError
@@ -41,6 +43,7 @@ class RunTests(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
+ Options.build_style,
Options.test,
Options.non_interactive,
Options.quiet,
@@ -53,44 +56,59 @@ class RunTests(AbstractStep):
if not self._options.non_interactive:
# FIXME: We should teach the commit-queue and the EWS how to run these tests.
- python_unittests_command = self._tool.port().run_python_unittests_command()
+ python_unittests_command = self._tool.deprecated_port().run_python_unittests_command()
if python_unittests_command:
_log.info("Running Python unit tests")
self._tool.executive.run_and_throw_if_fail(python_unittests_command, cwd=self._tool.scm().checkout_root)
- perl_unittests_command = self._tool.port().run_perl_unittests_command()
+ perl_unittests_command = self._tool.deprecated_port().run_perl_unittests_command()
if perl_unittests_command:
_log.info("Running Perl unit tests")
self._tool.executive.run_and_throw_if_fail(perl_unittests_command, cwd=self._tool.scm().checkout_root)
- javascriptcore_tests_command = self._tool.port().run_javascriptcore_tests_command()
+ javascriptcore_tests_command = self._tool.deprecated_port().run_javascriptcore_tests_command()
if javascriptcore_tests_command:
_log.info("Running JavaScriptCore tests")
self._tool.executive.run_and_throw_if_fail(javascriptcore_tests_command, quiet=True, cwd=self._tool.scm().checkout_root)
- webkit_unit_tests_command = self._tool.port().run_webkit_unit_tests_command()
+ bindings_tests_command = self._tool.deprecated_port().run_bindings_tests_command()
+ if bindings_tests_command:
+ _log.info("Running bindings generation tests")
+ args = bindings_tests_command
+ try:
+ self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
+ except ScriptError, e:
+ _log.info("Error running run-bindings-tests: %s" % e.message_with_output())
+
+ webkit_unit_tests_command = self._tool.deprecated_port().run_webkit_unit_tests_command()
if webkit_unit_tests_command:
_log.info("Running WebKit unit tests")
args = webkit_unit_tests_command
- if self._options.non_interactive:
- args.append("--gtest_output=xml:%s/webkit_unit_tests_output.xml" % self._tool.port().results_directory)
try:
self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
except ScriptError, e:
_log.info("Error running webkit_unit_tests: %s" % e.message_with_output())
+
_log.info("Running run-webkit-tests")
- args = self._tool.port().run_webkit_tests_command()
+ args = self._tool.deprecated_port().run_webkit_tests_command()
if self._options.non_interactive:
args.extend([
"--no-new-test-results",
- "--no-launch-safari",
- "--skip-failing-tests",
+ "--no-show-results",
"--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT,
- "--results-directory=%s" % self._tool.port().results_directory,
- "--quiet",
])
+ # old-run-webkit-tests does not support --skip-failing-tests
+ # Using --quiet one Windows fails when we try to use /dev/null, disabling for now until we find a fix
+ if sys.platform != "cygwin":
+ args.append("--quiet")
+ args.append("--skip-failing-tests")
+ else:
+ args.append("--no-build");
+
if self._options.quiet:
args.append("--quiet")
+
self._tool.executive.run_and_throw_if_fail(args, cwd=self._tool.scm().checkout_root)
+
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
index 78a867b36..ef8920e9b 100644
--- a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
@@ -26,7 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import platform
+import sys
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
@@ -38,9 +40,22 @@ class RunTestsTest(unittest.TestCase):
tool._deprecated_port.run_python_unittests_command = lambda: None
tool._deprecated_port.run_perl_unittests_command = lambda: None
step = RunTests(tool, MockOptions(test=True, non_interactive=True, quiet=False))
- expected_logs = """Running WebKit unit tests
-MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests', '--gtest_output=xml:/mock-results/webkit_unit_tests_output.xml'], cwd=/mock-checkout
+
+ if sys.platform != "cygwin":
+ expected_logs = """Running bindings generation tests
+MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
+Running WebKit unit tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
+Running run-webkit-tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-show-results', '--exit-after-n-failures=30', '--quiet', '--skip-failing-tests'], cwd=/mock-checkout
+"""
+ else:
+ expected_logs = """Running bindings generation tests
+MOCK run_and_throw_if_fail: ['mock-run-bindings-tests'], cwd=/mock-checkout
+Running WebKit unit tests
+MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests'], cwd=/mock-checkout
Running run-webkit-tests
-MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--skip-failing-tests', '--exit-after-n-failures=30', '--results-directory=/mock-results', '--quiet'], cwd=/mock-checkout
+MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-show-results', '--exit-after-n-failures=30', '--no-build'], cwd=/mock-checkout
"""
+
OutputCapture().assert_outputs(self, step.run, [{}], expected_logs=expected_logs)
diff --git a/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py
index c4ea47b4d..7172ba7f5 100644
--- a/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/steps_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.config.ports import DeprecatedPort
@@ -99,10 +99,9 @@ class StepsTest(unittest.TestCase):
mock_options = self._step_options()
mock_options.non_interactive = False
step = steps.RunTests(MockTool(log_executive=True), mock_options)
- # FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
- mock_port = DeprecatedPort()
tool = MockTool(log_executive=True)
- tool.port = lambda: mock_port
+ # FIXME: We shouldn't use a real port-object here, but there is too much to mock at the moment.
+ tool._deprecated_port = DeprecatedPort()
step = steps.RunTests(tool, mock_options)
expected_logs = """Running Python unit tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitpy'], cwd=/mock-checkout
@@ -110,6 +109,8 @@ Running Perl unit tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/test-webkitperl'], cwd=/mock-checkout
Running JavaScriptCore tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-javascriptcore-tests'], cwd=/mock-checkout
+Running bindings generation tests
+MOCK run_and_throw_if_fail: ['Tools/Scripts/run-bindings-tests'], cwd=/mock-checkout
Running run-webkit-tests
MOCK run_and_throw_if_fail: ['Tools/Scripts/run-webkit-tests', '--quiet'], cwd=/mock-checkout
"""
diff --git a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py
index 76bef35ac..40a24829b 100644
--- a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py
+++ b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers.py
@@ -42,9 +42,12 @@ class SuggestReviewers(AbstractStep):
if not self._options.suggest_reviewers:
return
- reviewers = self._tool.checkout().suggested_reviewers(self._options.git_commit, self._changed_files(state))
+ reviewers = self._tool.checkout().suggested_reviewers(self._options.git_commit, self._changed_files(state))[:5]
print "The following reviewers have recently modified files in your patch:"
- print "\n".join([reviewer.full_name for reviewer in reviewers])
+ print ", ".join([reviewer.full_name for reviewer in reviewers])
+
+ if not state.get('bug_id'):
+ return
if not self._tool.user.confirm("Would you like to CC them?"):
return
reviewer_emails = [reviewer.bugzilla_email() for reviewer in reviewers]
diff --git a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
index 42254c86b..fc096f118 100644
--- a/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/suggestreviewers_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
diff --git a/Tools/Scripts/webkitpy/tool/steps/update.py b/Tools/Scripts/webkitpy/tool/steps/update.py
index 0737ebcd0..f70354078 100644
--- a/Tools/Scripts/webkitpy/tool/steps/update.py
+++ b/Tools/Scripts/webkitpy/tool/steps/update.py
@@ -50,5 +50,5 @@ class Update(AbstractStep):
self._tool.executive.run_and_throw_if_fail(self._update_command(), quiet=self._options.quiet, cwd=self._tool.scm().checkout_root)
def _update_command(self):
- update_command = self._tool.port().update_webkit_command(self._options.non_interactive)
+ update_command = self._tool.deprecated_port().update_webkit_command(self._options.non_interactive)
return update_command
diff --git a/Tools/Scripts/webkitpy/tool/steps/update_unittest.py b/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
index c1a934db5..49d6b4098 100644
--- a/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
@@ -26,9 +26,9 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
-from webkitpy.common.config.ports import ChromiumPort, ChromiumAndroidPort, ChromiumXVFBPort
+from webkitpy.common.config.ports import MacPort, MacWK2Port
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.update import Update
@@ -41,14 +41,11 @@ class UpdateTest(unittest.TestCase):
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
- tool._deprecated_port = ChromiumPort()
- self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update"], step._update_command())
+ tool._deprecated_port = MacPort()
+ self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
- tool._deprecated_port = ChromiumXVFBPort()
- self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update"], step._update_command())
-
- tool._deprecated_port = ChromiumAndroidPort()
- self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update", "--chromium-android"], step._update_command())
+ tool._deprecated_port = MacWK2Port()
+ self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
def test_update_command_interactive(self):
tool = MockTool()
@@ -56,11 +53,8 @@ class UpdateTest(unittest.TestCase):
step = Update(tool, options)
self.assertEqual(["mock-update-webkit"], step._update_command())
- tool._deprecated_port = ChromiumPort()
- self.assertEqual(["Tools/Scripts/update-webkit", "--chromium"], step._update_command())
-
- tool._deprecated_port = ChromiumXVFBPort()
- self.assertEqual(["Tools/Scripts/update-webkit", "--chromium"], step._update_command())
+ tool._deprecated_port = MacPort()
+ self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
- tool._deprecated_port = ChromiumAndroidPort()
- self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--chromium-android"], step._update_command())
+ tool._deprecated_port = MacWK2Port()
+ self.assertEqual(["Tools/Scripts/update-webkit"], step._update_command())
diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py
index 3182cf3ab..d433e3f21 100644
--- a/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/updatechangelogswithreview_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.mocktool import MockOptions, MockTool
diff --git a/Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py b/Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py
deleted file mode 100644
index 23d861bfc..000000000
--- a/Tools/Scripts/webkitpy/tool/steps/updatechromiumdeps.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-import logging
-import sys
-import urllib2
-
-from webkitpy.tool.steps.abstractstep import AbstractStep
-from webkitpy.tool.steps.options import Options
-from webkitpy.common.config import urls
-
-_log = logging.getLogger(__name__)
-
-
-class UpdateChromiumDEPS(AbstractStep):
- @classmethod
- def options(cls):
- return AbstractStep.options() + [
- Options.non_interactive,
- ]
-
- # Notice that this method throws lots of exciting exceptions!
- def _fetch_last_known_good_revision(self):
- return int(urllib2.urlopen(urls.chromium_lkgr_url).read())
-
- def _validate_revisions(self, current_chromium_revision, new_chromium_revision):
- if new_chromium_revision < current_chromium_revision:
- message = "Current Chromium DEPS revision %s is newer than %s." % (current_chromium_revision, new_chromium_revision)
- if self._options.non_interactive:
- _log.error(message)
- sys.exit(1)
- _log.info(message)
- new_chromium_revision = self._tool.user.prompt("Enter new chromium revision (enter nothing to cancel):\n")
- try:
- new_chromium_revision = int(new_chromium_revision)
- except ValueError, TypeError:
- new_chromium_revision = None
- if not new_chromium_revision:
- _log.error("Unable to update Chromium DEPS")
- sys.exit(1)
-
- def run(self, state):
- # Note that state["chromium_revision"] must be defined, but can be None.
- new_chromium_revision = state["chromium_revision"]
- if not new_chromium_revision:
- new_chromium_revision = self._fetch_last_known_good_revision()
-
- deps = self._tool.checkout().chromium_deps()
- current_chromium_revision = deps.read_variable("chromium_rev")
- self._validate_revisions(current_chromium_revision, new_chromium_revision)
- _log.info("Updating Chromium DEPS to %s" % new_chromium_revision)
- deps.write_variable("chromium_rev", new_chromium_revision)
diff --git a/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py
index 061baa5ec..e77e5c01e 100644
--- a/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py
+++ b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs.py
@@ -29,6 +29,7 @@
import logging
import sys
+from optparse import make_option
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
from webkitpy.common.checkout.diff_parser import DiffParser
@@ -42,12 +43,11 @@ class ValidateChangeLogs(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
+ make_option("--check-oops", action="store_true", default=False, help="Check there are no OOPS left in change log"),
Options.non_interactive,
]
def _check_changelog_diff(self, diff_file):
- if not self._tool.checkout().is_path_to_changelog(diff_file.filename):
- return True
# Each line is a tuple, the first value is the deleted line number
# Date, reviewer, bug title, bug url, and empty lines could all be
# identical in the most recent entries. If the diff starts any
@@ -64,6 +64,12 @@ class ValidateChangeLogs(AbstractStep):
return True
return False
+ def _changelog_contains_oops(self, diff_file):
+ for diff_line in diff_file.lines:
+ if 'OOPS!' in diff_line[2]:
+ return True
+ return False
+
def run(self, state):
changed_files = self.cached_lookup(state, "changed_files")
for filename in changed_files:
@@ -76,6 +82,11 @@ class ValidateChangeLogs(AbstractStep):
diff = self._tool.scm().diff_for_file(filename)
parsed_diff = DiffParser(diff.splitlines())
for filename, diff_file in parsed_diff.files.items():
+ if not self._tool.checkout().is_path_to_changelog(diff_file.filename):
+ continue
if not self._check_changelog_diff(diff_file):
_log.error("ChangeLog entry in %s is not at the top of the file." % diff_file.filename)
sys.exit(1)
+ if self._options.check_oops and self._changelog_contains_oops(diff_file):
+ _log.error("ChangeLog entry in %s contains OOPS!." % diff_file.filename)
+ sys.exit(1)
diff --git a/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py
index c3b723ed1..50ecc4646 100644
--- a/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/validatechangelogs_unittest.py
@@ -26,7 +26,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-import unittest
+import unittest2 as unittest
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.thirdparty.mock import Mock
@@ -38,7 +38,6 @@ class ValidateChangeLogsTest(unittest.TestCase):
def _assert_start_line_produces_output(self, start_line, should_fail=False, non_interactive=False):
tool = MockTool()
- tool._checkout.is_path_to_changelog = lambda path: True
step = ValidateChangeLogs(tool, MockOptions(git_commit=None, non_interactive=non_interactive))
diff_file = Mock()
diff_file.filename = "mock/ChangeLog"
@@ -56,3 +55,15 @@ class ValidateChangeLogsTest(unittest.TestCase):
self._assert_start_line_produces_output(1, non_interactive=False)
self._assert_start_line_produces_output(8, non_interactive=True, should_fail=True)
+
+ def test_changelog_contains_oops(self):
+ tool = MockTool()
+ tool._checkout.is_path_to_changelog = lambda path: True
+ step = ValidateChangeLogs(tool, MockOptions(git_commit=None, non_interactive=True, check_oops=True))
+ diff_file = Mock()
+ diff_file.filename = "mock/ChangeLog"
+ diff_file.lines = [(1, 1, "foo"), (2, 2, "bar OOPS! bar"), (3, 3, "foo")]
+ self.assertTrue(OutputCapture().assert_outputs(self, step._changelog_contains_oops, [diff_file], expected_logs=''))
+
+ diff_file.lines = [(1, 1, "foo"), (2, 2, "bar OOPS bar"), (3, 3, "foo")]
+ self.assertFalse(OutputCapture().assert_outputs(self, step._changelog_contains_oops, [diff_file], expected_logs=''))
diff --git a/Tools/Scripts/webkitpy/w3c/__init__.py b/Tools/Scripts/webkitpy/w3c/__init__.py
new file mode 100644
index 000000000..ef65bee5b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/__init__.py
@@ -0,0 +1 @@
+# Required for Python to search this directory for module files
diff --git a/Tools/Scripts/webkitpy/w3c/test_converter.py b/Tools/Scripts/webkitpy/w3c/test_converter.py
new file mode 100644
index 000000000..2e9bfcb9a
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_converter.py
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.host import Host
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup, Tag
+
+
+_log = logging.getLogger(__name__)
+
+
+class W3CTestConverter(object):
+
+ def __init__(self):
+ self._host = Host()
+ self._filesystem = self._host.filesystem
+ self._webkit_root = WebKitFinder(self._filesystem).webkit_base()
+
+ # These settings might vary between WebKit and Blink
+ self._css_property_file = self.path_from_webkit_root('Source', 'WebCore', 'css', 'CSSPropertyNames.in')
+ self._css_property_split_string = '='
+
+ self.prefixed_properties = self.read_webkit_prefixed_css_property_list()
+
+ def path_from_webkit_root(self, *comps):
+ return self._filesystem.abspath(self._filesystem.join(self._webkit_root, *comps))
+
+ def read_webkit_prefixed_css_property_list(self):
+ prefixed_properties = []
+
+ contents = self._filesystem.read_text_file(self._css_property_file)
+ for line in contents.splitlines():
+ # Find lines starting with the -webkit- prefix.
+ match = re.match('-webkit-[\w|-]*', line)
+ if match:
+ # Ignore lines where both the prefixed and non-prefixed property
+ # are supported - denoted by -webkit-some-property = some-property.
+ fields = line.split(self._css_property_split_string)
+ if len(fields) == 2 and fields[1].strip() in fields[0].strip():
+ continue
+ prefixed_properties.append(match.group(0))
+
+ return prefixed_properties
+
+ def convert_for_webkit(self, new_path, filename):
+ """ Converts a file's |contents| so it will function correctly in its |new_path| in Webkit.
+
+ Returns the list of modified properties and the modified text if the file was modifed, None otherwise."""
+ contents = self._filesystem.read_binary_file(filename)
+ if filename.endswith('.css'):
+ return self.convert_css(contents, filename)
+ return self.convert_html(new_path, contents, filename)
+
+ def convert_css(self, contents, filename):
+ return self.add_webkit_prefix_to_unprefixed_properties(contents, filename)
+
+ def convert_html(self, new_path, contents, filename):
+ doc = BeautifulSoup(contents)
+ did_modify_paths = self.convert_testharness_paths(doc, new_path, filename)
+ converted_properties_and_content = self.convert_prefixed_properties(doc, filename)
+ return converted_properties_and_content if (did_modify_paths or converted_properties_and_content[0]) else None
+
+ def convert_testharness_paths(self, doc, new_path, filename):
+ """ Update links to testharness.js in the BeautifulSoup |doc| to point to the copy in |new_path|.
+
+ Returns whether the document was modified."""
+
+ # Look for the W3C-style path to any testharness files - scripts (.js) or links (.css)
+ pattern = re.compile('/resources/testharness')
+ script_tags = doc.findAll(src=pattern)
+ link_tags = doc.findAll(href=pattern)
+ testharness_tags = script_tags + link_tags
+
+ if not testharness_tags:
+ return False
+
+ resources_path = self.path_from_webkit_root('LayoutTests', 'resources')
+ resources_relpath = self._filesystem.relpath(resources_path, new_path)
+
+ for tag in testharness_tags:
+ # FIXME: We need to handle img, audio, video tags also.
+ attr = 'src'
+ if tag.name != 'script':
+ attr = 'href'
+
+ if not attr in tag.attrMap:
+ # FIXME: Figure out what to do w/ invalid tags. For now, we return False
+ # and leave the document unmodified, which means that it'll probably fail to run.
+ _log.error("Missing an attr in %s" % filename)
+ return False
+
+ old_path = tag[attr]
+ new_tag = Tag(doc, tag.name, tag.attrs)
+ new_tag[attr] = re.sub(pattern, resources_relpath + '/testharness', old_path)
+
+ self.replace_tag(tag, new_tag)
+
+ return True
+
+ def convert_prefixed_properties(self, doc, filename):
+ """ Searches a BeautifulSoup |doc| for any CSS properties requiring the -webkit- prefix and converts them.
+
+ Returns the list of converted properties and the modified document as a string """
+
+ converted_properties = []
+
+ # Look for inline and document styles.
+ inline_styles = doc.findAll(style=re.compile('.*'))
+ style_tags = doc.findAll('style')
+ all_styles = inline_styles + style_tags
+
+ for tag in all_styles:
+
+ # Get the text whether in a style tag or style attribute.
+ style_text = ''
+ if tag.name == 'style':
+ if not tag.contents:
+ continue
+ style_text = tag.contents[0]
+ else:
+ style_text = tag['style']
+
+ updated_style_text = self.add_webkit_prefix_to_unprefixed_properties(style_text, filename)
+
+ # Rewrite tag only if changes were made.
+ if updated_style_text[0]:
+ converted_properties.extend(updated_style_text[0])
+
+ new_tag = Tag(doc, tag.name, tag.attrs)
+ new_tag.insert(0, updated_style_text[1])
+
+ self.replace_tag(tag, new_tag)
+
+ return (converted_properties, doc.prettify())
+
+ def add_webkit_prefix_to_unprefixed_properties(self, text, filename):
+ """ Searches |text| for instances of properties requiring the -webkit- prefix and adds the prefix to them.
+
+ Returns the list of converted properties and the modified text."""
+
+ converted_properties = []
+
+ for prefixed_property in self.prefixed_properties:
+ # FIXME: add in both the prefixed and unprefixed versions, rather than just replacing them?
+ # That might allow the imported test to work in other browsers more easily.
+
+ unprefixed_property = prefixed_property.replace('-webkit-', '')
+
+ # Look for the various ways it might be in the CSS
+ # Match the the property preceded by either whitespace or left curly brace
+ # or at the beginning of the string (for inline style attribute)
+ pattern = '([\s{]|^)' + unprefixed_property + '(\s+:|:)'
+ if re.search(pattern, text):
+ _log.info('converting %s -> %s' % (unprefixed_property, prefixed_property))
+ converted_properties.append(prefixed_property)
+ text = re.sub(pattern, prefixed_property + ':', text)
+
+ # FIXME: Handle the JS versions of these properties and GetComputedStyle, too.
+ return (converted_properties, text)
+
+ def replace_tag(self, old_tag, new_tag):
+ index = old_tag.parent.contents.index(old_tag)
+ old_tag.parent.insert(index, new_tag)
+ old_tag.extract()
diff --git a/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py b/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
new file mode 100644
index 000000000..ff104abd5
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_converter_unittest.py
@@ -0,0 +1,319 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import os
+import re
+import unittest2 as unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
+from webkitpy.w3c.test_converter import W3CTestConverter
+
+
+DUMMY_FILENAME = 'dummy.html'
+
+class W3CTestConverterTest(unittest.TestCase):
+
+ def fake_dir_path(self, converter, dirname):
+ return converter.path_from_webkit_root("LayoutTests", "css", dirname)
+
+ def test_read_prefixed_property_list(self):
+ """ Tests that the current list of properties requiring the -webkit- prefix load correctly """
+
+ # FIXME: We should be passing in a MockHost here ...
+ converter = W3CTestConverter()
+ prop_list = converter.prefixed_properties
+ self.assertTrue(prop_list, 'No prefixed properties found')
+ for prop in prop_list:
+ self.assertTrue(prop.startswith('-webkit-'))
+
+ def test_convert_for_webkit_nothing_to_convert(self):
+ """ Tests convert_for_webkit() using a basic test that has nothing to convert """
+
+ test_html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR"
+href="mailto:EMAIL OR http://CONTACT_PAGE"/>
+<link rel="help" href="RELEVANT_SPEC_SECTION"/>
+<meta name="assert" content="TEST ASSERTION"/>
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ converter = W3CTestConverter()
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converted = converter.convert_html('/nothing/to/convert', test_html, DUMMY_FILENAME)
+ finally:
+ oc.restore_output()
+
+ self.verify_no_conversion_happened(converted)
+
+ def test_convert_for_webkit_harness_only(self):
+ """ Tests convert_for_webkit() using a basic JS test that uses testharness.js only and has no prefixed properties """
+
+ test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+</head>
+"""
+ converter = W3CTestConverter()
+ fake_dir_path = self.fake_dir_path(converter, "harnessonly")
+
+ converted = converter.convert_html(fake_dir_path, test_html, DUMMY_FILENAME)
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+ self.verify_prefixed_properties(converted, [])
+
+ def test_convert_for_webkit_properties_only(self):
+ """ Tests convert_for_webkit() using a test that has 2 prefixed properties: 1 in a style block + 1 inline style """
+
+ test_html = """<html>
+<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<style type="text/css">
+
+#block1 { @test0@: propvalue; }
+
+</style>
+</head>
+<body>
+<div id="elem1" style="@test1@: propvalue;"></div>
+</body>
+</html>
+"""
+ converter = W3CTestConverter()
+ fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
+ test_content = self.generate_test_content(converter.prefixed_properties, 1, test_html)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+ self.verify_prefixed_properties(converted, test_content[0])
+
+ def test_convert_for_webkit_harness_and_properties(self):
+ """ Tests convert_for_webkit() using a basic JS test that uses testharness.js and testharness.css and has 4 prefixed properties: 3 in a style block + 1 inline style """
+
+ test_html = """<html>
+<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<style type="text/css">
+
+#block1 { @test0@: propvalue; }
+#block2 { @test1@: propvalue; }
+#block3 { @test2@: propvalue; }
+
+</style>
+</head>
+<body>
+<div id="elem1" style="@test3@: propvalue;"></div>
+</body>
+</html>
+"""
+ converter = W3CTestConverter()
+ fake_dir_path = self.fake_dir_path(converter, 'harnessandprops')
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ test_content = self.generate_test_content(converter.prefixed_properties, 2, test_html)
+ converted = converter.convert_html(fake_dir_path, test_content[1], DUMMY_FILENAME)
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, converted[1], fake_dir_path, 1, 1)
+ self.verify_prefixed_properties(converted, test_content[0])
+
+ def test_convert_test_harness_paths(self):
+ """ Tests convert_testharness_paths() with a test that uses all three testharness files """
+
+ test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+</head>
+"""
+ converter = W3CTestConverter()
+
+ fake_dir_path = self.fake_dir_path(converter, 'testharnesspaths')
+
+ doc = BeautifulSoup(test_html)
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converted = converter.convert_testharness_paths(doc, fake_dir_path, DUMMY_FILENAME)
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_test_harness_paths(converter, doc, fake_dir_path, 2, 1)
+
+ def test_convert_prefixed_properties(self):
+ """ Tests convert_prefixed_properties() file that has 20 properties requiring the -webkit- prefix:
+ 10 in one style block + 5 in another style
+ block + 5 inline styles, including one with multiple prefixed properties.
+ The properties in the test content are in all sorts of wack formatting.
+ """
+
+ test_html = """<html>
+<style type="text/css"><![CDATA[
+
+.block1 {
+ width: 300px;
+ height: 300px
+}
+
+.block2 {
+ @test0@: propvalue;
+}
+
+.block3{@test1@: propvalue;}
+
+.block4 { @test2@:propvalue; }
+
+.block5{ @test3@ :propvalue; }
+
+#block6 { @test4@ : propvalue; }
+
+#block7
+{
+ @test5@: propvalue;
+}
+
+#block8 { @test6@: propvalue; }
+
+#block9:pseudo
+{
+
+ @test7@: propvalue;
+ @test8@: propvalue propvalue propvalue;;
+}
+
+]]></style>
+</head>
+<body>
+ <div id="elem1" style="@test9@: propvalue;"></div>
+ <div id="elem2" style="propname: propvalue; @test10@ : propvalue; propname:propvalue;"></div>
+ <div id="elem2" style="@test11@: propvalue; @test12@ : propvalue; @test13@ :propvalue;"></div>
+ <div id="elem3" style="@test14@:propvalue"></div>
+</body>
+<style type="text/css"><![CDATA[
+
+.block10{ @test15@: propvalue; }
+.block11{ @test16@: propvalue; }
+.block12{ @test17@: propvalue; }
+#block13:pseudo
+{
+ @test18@: propvalue;
+ @test19@: propvalue;
+}
+
+]]></style>
+</html>
+"""
+ converter = W3CTestConverter()
+
+ test_content = self.generate_test_content(converter.prefixed_properties, 20, test_html)
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ converted = converter.convert_prefixed_properties(BeautifulSoup(test_content[1]), DUMMY_FILENAME)
+ finally:
+ oc.restore_output()
+
+ self.verify_conversion_happened(converted)
+ self.verify_prefixed_properties(converted, test_content[0])
+
+ def verify_conversion_happened(self, converted):
+ self.assertTrue(converted, "conversion didn't happen")
+
+ def verify_no_conversion_happened(self, converted):
+ self.assertEqual(converted, None, 'test should not have been converted')
+
+ def verify_test_harness_paths(self, converter, converted, test_path, num_src_paths, num_href_paths):
+ if isinstance(converted, basestring):
+ converted = BeautifulSoup(converted)
+
+ resources_dir = converter.path_from_webkit_root("LayoutTests", "resources")
+
+ # Verify the original paths are gone, and the new paths are present.
+ orig_path_pattern = re.compile('\"/resources/testharness')
+ self.assertEquals(len(converted.findAll(src=orig_path_pattern)), 0, 'testharness src path was not converted')
+ self.assertEquals(len(converted.findAll(href=orig_path_pattern)), 0, 'testharness href path was not converted')
+
+ new_relpath = os.path.relpath(resources_dir, test_path)
+ relpath_pattern = re.compile(new_relpath)
+ self.assertEquals(len(converted.findAll(src=relpath_pattern)), num_src_paths, 'testharness src relative path not correct')
+ self.assertEquals(len(converted.findAll(href=relpath_pattern)), num_href_paths, 'testharness href relative path not correct')
+
+ def verify_prefixed_properties(self, converted, test_properties):
+ self.assertEqual(len(converted[0]), len(test_properties), 'Incorrect number of properties converted')
+ for test_prop in test_properties:
+ self.assertTrue((test_prop in converted[1]), 'Property ' + test_prop + ' not found in converted doc')
+
+ def generate_test_content(self, full_property_list, num_test_properties, html):
+ """Inserts properties requiring a -webkit- prefix into the content, replacing \'@testXX@\' with a property."""
+ test_properties = []
+ count = 0
+ while count < num_test_properties:
+ test_properties.append(full_property_list[count])
+ count += 1
+
+ # Replace the tokens in the testhtml with the test properties. Walk backward
+ # through the list to replace the double-digit tokens first
+ index = len(test_properties) - 1
+ while index >= 0:
+ # Use the unprefixed version
+ test_prop = test_properties[index].replace('-webkit-', '')
+ # Replace the token
+ html = html.replace('@test' + str(index) + '@', test_prop)
+ index -= 1
+
+ return (test_properties, html)
diff --git a/Tools/Scripts/webkitpy/w3c/test_importer.py b/Tools/Scripts/webkitpy/w3c/test_importer.py
new file mode 100644
index 000000000..119bd7d92
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_importer.py
@@ -0,0 +1,450 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+"""
+ This script imports a directory of W3C CSS tests into WebKit.
+
+ You must have checked out the W3C repository to your local drive.
+
+ This script will import the tests into WebKit following these rules:
+
+ - Only tests that are approved or officially submitted awaiting review are imported
+
+ - All tests are imported into LayoutTests/csswg
+
+ - If the tests are approved, they'll be imported into a directory tree that
+ mirrors the CSS Mercurial repo. For example, <csswg_repo_root>/approved/css2.1 is brought in
+ as LayoutTests/csswg/approved/css2.1, maintaining the entire directory structure under that
+
+ - If the tests are submitted, they'll be brought in as LayoutTests/csswg/submitted and will also
+ maintain their directory structure under that. For example, everything under
+ <csswg_repo_root>/contributors/adobe/submitted is brought into submitted, mirroring its
+ directory structure in the csswg repo
+
+ - If the import directory specified is just a contributor folder, only the submitted folder
+ for that contributor is brought in. For example, to import all of Mozilla's tests, either
+ <csswg_repo_root>/contributors/mozilla or <csswg_repo_root>/contributors/mozilla/submitted
+ will work and are equivalent
+
+ - For the time being, this script won't work if you try to import the full set of submitted
+ tests under contributors/*/submitted. Since these are awaiting review, this is just a small
+ control mechanism to enforce carefully selecting what non-approved tests are imported.
+ It can obviously and easily be changed.
+
+ - By default, only reftests and jstest are imported. This can be overridden with a -a or --all
+ argument
+
+ - Also by default, if test files by the same name already exist in the destination directory,
+ they are overwritten with the idea that running this script would refresh files periodically.
+ This can also be overridden by a -n or --no-overwrite flag
+
+ - All files are converted to work in WebKit:
+ 1. Paths to testharness.js files are modified point to Webkit's copy of them in
+ LayoutTests/resources, using the correct relative path from the new location
+ 2. All CSS properties requiring the -webkit-vendor prefix are prefixed - this current
+ list of what needs prefixes is read from Source/WebCore/CSS/CSSProperties.in
+ 3. Each reftest has its own copy of its reference file following the naming conventions
+ new-run-webkit-tests expects
+ 4. If a reference files lives outside the directory of the test that uses it, it is checked
+ for paths to support files as it will be imported into a different relative position to the
+ test file (in the same directory)
+
+ - Upon completion, script outputs the total number tests imported, broken down by test type
+
+ - Also upon completion, each directory where files are imported will have w3c-import.log written
+ with a timestamp, the W3C Mercurial changeset if available, the list of CSS properties used that
+ require prefixes, the list of imported files, and guidance for future test modification and
+ maintenance.
+
+ - On subsequent imports, this file is read to determine if files have been removed in the newer changesets.
+ The script removes these files accordingly.
+"""
+
+# FIXME: Change this file to use the Host abstractions rather that os, sys, shutils, etc.
+
+import datetime
+import logging
+import mimetypes
+import optparse
+import os
+import shutil
+import sys
+
+from webkitpy.common.host import Host
+from webkitpy.common.webkit_finder import WebKitFinder
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.w3c.test_parser import TestParser
+from webkitpy.w3c.test_converter import W3CTestConverter
+
+
+TEST_STATUS_UNKNOWN = 'unknown'
+TEST_STATUS_APPROVED = 'approved'
+TEST_STATUS_SUBMITTED = 'submitted'
+
+CHANGESET_NOT_AVAILABLE = 'Not Available'
+
+
+_log = logging.getLogger(__name__)
+
+
+def main(_argv, _stdout, _stderr):
+ options, args = parse_args()
+ import_dir = args[0]
+ if len(args) == 1:
+ repo_dir = os.path.dirname(import_dir)
+ else:
+ repo_dir = args[1]
+
+ if not os.path.exists(import_dir):
+ sys.exit('Source directory %s not found!' % import_dir)
+
+ if not os.path.exists(repo_dir):
+ sys.exit('Repository directory %s not found!' % repo_dir)
+ if not repo_dir in import_dir:
+ sys.exit('Repository directory %s must be a parent of %s' % (repo_dir, import_dir))
+
+ configure_logging()
+
+ test_importer = TestImporter(Host(), import_dir, repo_dir, options)
+ test_importer.do_import()
+
+
+def configure_logging():
+ class LogHandler(logging.StreamHandler):
+
+ def format(self, record):
+ if record.levelno > logging.INFO:
+ return "%s: %s" % (record.levelname, record.getMessage())
+ return record.getMessage()
+
+ logger = logging.getLogger()
+ logger.setLevel(logging.INFO)
+ handler = LogHandler()
+ handler.setLevel(logging.INFO)
+ logger.addHandler(handler)
+ return handler
+
+
+def parse_args():
+ parser = optparse.OptionParser(usage='usage: %prog [options] w3c_test_directory [repo_directory]')
+ parser.add_option('-n', '--no-overwrite', dest='overwrite', action='store_false', default=True,
+ help='Flag to prevent duplicate test files from overwriting existing tests. By default, they will be overwritten')
+ parser.add_option('-a', '--all', action='store_true', default=False,
+ help='Import all tests including reftests, JS tests, and manual/pixel tests. By default, only reftests and JS tests are imported')
+
+ options, args = parser.parse_args()
+ if len(args) not in (1, 2):
+ parser.error('Incorrect number of arguments')
+ return options, args
+
+
+class TestImporter(object):
+
+ def __init__(self, host, source_directory, repo_dir, options):
+ self.host = host
+ self.source_directory = source_directory
+ self.options = options
+
+ self.filesystem = self.host.filesystem
+
+ webkit_finder = WebKitFinder(self.filesystem)
+ self._webkit_root = webkit_finder.webkit_base()
+ self.repo_dir = repo_dir
+ subdirs = os.path.dirname(os.path.relpath(source_directory, repo_dir))
+
+ self.destination_directory = webkit_finder.path_from_webkit_base("LayoutTests", 'w3c', *subdirs)
+
+ self.changeset = CHANGESET_NOT_AVAILABLE
+ self.test_status = TEST_STATUS_UNKNOWN
+
+ self.import_list = []
+
+ def do_import(self):
+ self.find_importable_tests(self.source_directory)
+ self.load_changeset()
+ self.import_tests()
+
+ def load_changeset(self):
+ """Returns the current changeset from mercurial or "Not Available"."""
+ try:
+ self.changeset = self.host.executive.run_command(['hg', 'tip']).split('changeset:')[1]
+ except (OSError, ScriptError):
+ self.changeset = CHANGESET_NOT_AVAILABLE
+
+ def find_importable_tests(self, directory):
+ # FIXME: use filesystem
+ for root, dirs, files in os.walk(directory):
+ _log.info('Scanning ' + root + '...')
+ total_tests = 0
+ reftests = 0
+ jstests = 0
+
+ # "archive" and "data" dirs are internal csswg things that live in every approved directory.
+ # FIXME: skip 'incoming' tests for now, but we should rework the 'test_status' concept and
+ # support reading them as well.
+ DIRS_TO_SKIP = ('.git', '.hg', 'data', 'archive', 'incoming')
+ for d in DIRS_TO_SKIP:
+ if d in dirs:
+ dirs.remove(d)
+
+ copy_list = []
+
+ for filename in files:
+ # FIXME: This block should really be a separate function, but the early-continues make that difficult.
+
+ if filename.startswith('.') or filename.endswith('.pl'):
+ continue # For some reason the w3c repo contains random perl scripts we don't care about.
+
+ fullpath = os.path.join(root, filename)
+
+ mimetype = mimetypes.guess_type(fullpath)
+ if not 'html' in str(mimetype[0]) and not 'xml' in str(mimetype[0]):
+ copy_list.append({'src': fullpath, 'dest': filename})
+ continue
+
+ test_parser = TestParser(vars(self.options), filename=fullpath)
+ test_info = test_parser.analyze_test()
+ if test_info is None:
+ continue
+
+ if 'reference' in test_info.keys():
+ reftests += 1
+ total_tests += 1
+ test_basename = os.path.basename(test_info['test'])
+
+ # Add the ref file, following WebKit style.
+ # FIXME: Ideally we'd support reading the metadata
+ # directly rather than relying on a naming convention.
+ # Using a naming convention creates duplicate copies of the
+ # reference files.
+ ref_file = os.path.splitext(test_basename)[0] + '-expected'
+ ref_file += os.path.splitext(test_basename)[1]
+
+ copy_list.append({'src': test_info['reference'], 'dest': ref_file})
+ copy_list.append({'src': test_info['test'], 'dest': filename})
+
+ # Update any support files that need to move as well to remain relative to the -expected file.
+ if 'refsupport' in test_info.keys():
+ for support_file in test_info['refsupport']:
+ source_file = os.path.join(os.path.dirname(test_info['reference']), support_file)
+ source_file = os.path.normpath(source_file)
+
+ # Keep the dest as it was
+ to_copy = {'src': source_file, 'dest': support_file}
+
+ # Only add it once
+ if not(to_copy in copy_list):
+ copy_list.append(to_copy)
+ elif 'jstest' in test_info.keys():
+ jstests += 1
+ total_tests += 1
+ copy_list.append({'src': fullpath, 'dest': filename})
+ else:
+ total_tests += 1
+ copy_list.append({'src': fullpath, 'dest': filename})
+
+ if not total_tests:
+ # We can skip the support directory if no tests were found.
+ if 'support' in dirs:
+ dirs.remove('support')
+
+ if copy_list:
+ # Only add this directory to the list if there's something to import
+ self.import_list.append({'dirname': root, 'copy_list': copy_list,
+ 'reftests': reftests, 'jstests': jstests, 'total_tests': total_tests})
+
+ def import_tests(self):
+ converter = W3CTestConverter()
+ total_imported_tests = 0
+ total_imported_reftests = 0
+ total_imported_jstests = 0
+ total_prefixed_properties = {}
+
+ for dir_to_copy in self.import_list:
+ total_imported_tests += dir_to_copy['total_tests']
+ total_imported_reftests += dir_to_copy['reftests']
+ total_imported_jstests += dir_to_copy['jstests']
+
+ prefixed_properties = []
+
+ if not dir_to_copy['copy_list']:
+ continue
+
+ orig_path = dir_to_copy['dirname']
+
+ subpath = os.path.relpath(orig_path, self.repo_dir)
+ new_path = os.path.join(self.destination_directory, subpath)
+
+ if not(os.path.exists(new_path)):
+ os.makedirs(new_path)
+
+ copied_files = []
+
+ for file_to_copy in dir_to_copy['copy_list']:
+ # FIXME: Split this block into a separate function.
+ orig_filepath = os.path.normpath(file_to_copy['src'])
+
+ if os.path.isdir(orig_filepath):
+ # FIXME: Figure out what is triggering this and what to do about it.
+ _log.error('%s refers to a directory' % orig_filepath)
+ continue
+
+ if not(os.path.exists(orig_filepath)):
+ _log.warning('%s not found. Possible error in the test.', orig_filepath)
+ continue
+
+ new_filepath = os.path.join(new_path, file_to_copy['dest'])
+
+ if not(os.path.exists(os.path.dirname(new_filepath))):
+ os.makedirs(os.path.dirname(new_filepath))
+
+ if not self.options.overwrite and os.path.exists(new_filepath):
+ _log.info('Skipping import of existing file ' + new_filepath)
+ else:
+ # FIXME: Maybe doing a file diff is in order here for existing files?
+ # In other words, there's no sense in overwriting identical files, but
+ # there's no harm in copying the identical thing.
+ _log.info('Importing: %s', orig_filepath)
+ _log.info(' As: %s', new_filepath)
+
+ # Only html, xml, or css should be converted
+ # FIXME: Eventually, so should js when support is added for this type of conversion
+ mimetype = mimetypes.guess_type(orig_filepath)
+ if 'html' in str(mimetype[0]) or 'xml' in str(mimetype[0]) or 'css' in str(mimetype[0]):
+ converted_file = converter.convert_for_webkit(new_path, filename=orig_filepath)
+
+ if not converted_file:
+ shutil.copyfile(orig_filepath, new_filepath) # The file was unmodified.
+ else:
+ for prefixed_property in converted_file[0]:
+ total_prefixed_properties.setdefault(prefixed_property, 0)
+ total_prefixed_properties[prefixed_property] += 1
+
+ prefixed_properties.extend(set(converted_file[0]) - set(prefixed_properties))
+ outfile = open(new_filepath, 'wb')
+ outfile.write(converted_file[1])
+ outfile.close()
+ else:
+ shutil.copyfile(orig_filepath, new_filepath)
+
+ copied_files.append(new_filepath.replace(self._webkit_root, ''))
+
+ self.remove_deleted_files(new_path, copied_files)
+ self.write_import_log(new_path, copied_files, prefixed_properties)
+
+ _log.info('Import complete')
+
+ _log.info('IMPORTED %d TOTAL TESTS', total_imported_tests)
+ _log.info('Imported %d reftests', total_imported_reftests)
+ _log.info('Imported %d JS tests', total_imported_jstests)
+ _log.info('Imported %d pixel/manual tests', total_imported_tests - total_imported_jstests - total_imported_reftests)
+ _log.info('')
+ _log.info('Properties needing prefixes (by count):')
+ for prefixed_property in sorted(total_prefixed_properties, key=lambda p: total_prefixed_properties[p]):
+ _log.info(' %s: %s', prefixed_property, total_prefixed_properties[prefixed_property])
+
+ def setup_destination_directory(self):
+ """ Creates a destination directory that mirrors that of the source approved or submitted directory """
+
+ self.update_test_status()
+
+ start = self.source_directory.find(self.test_status)
+ new_subpath = self.source_directory[len(self.repo_dir):]
+
+ destination_directory = os.path.join(self.destination_directory, new_subpath)
+
+ if not os.path.exists(destination_directory):
+ os.makedirs(destination_directory)
+
+ _log.info('Tests will be imported into: %s', destination_directory)
+
+ def update_test_status(self):
+ """ Sets the test status to either 'approved' or 'submitted' """
+
+ status = TEST_STATUS_UNKNOWN
+
+ if 'approved' in self.source_directory.split(os.path.sep):
+ status = TEST_STATUS_APPROVED
+ elif 'submitted' in self.source_directory.split(os.path.sep):
+ status = TEST_STATUS_SUBMITTED
+
+ self.test_status = status
+
+ def remove_deleted_files(self, import_directory, new_file_list):
+ """ Reads an import log in |import_directory|, compares it to the |new_file_list|, and removes files not in the new list."""
+
+ previous_file_list = []
+
+ import_log_file = os.path.join(import_directory, 'w3c-import.log')
+ if not os.path.exists(import_log_file):
+ return
+
+ import_log = open(import_log_file, 'r')
+ contents = import_log.readlines()
+
+ if 'List of files\n' in contents:
+ list_index = contents.index('List of files:\n') + 1
+ previous_file_list = [filename.strip() for filename in contents[list_index:]]
+
+ deleted_files = set(previous_file_list) - set(new_file_list)
+ for deleted_file in deleted_files:
+ _log.info('Deleting file removed from the W3C repo: %s', deleted_file)
+ deleted_file = os.path.join(self._webkit_root, deleted_file)
+ os.remove(deleted_file)
+
+ import_log.close()
+
+ def write_import_log(self, import_directory, file_list, prop_list):
+ """ Writes a w3c-import.log file in each directory with imported files. """
+
+ now = datetime.datetime.now()
+
+ import_log = open(os.path.join(import_directory, 'w3c-import.log'), 'w')
+ import_log.write('The tests in this directory were imported from the W3C repository.\n')
+ import_log.write('Do NOT modify these tests directly in Webkit. Instead, push changes to the W3C CSS repo:\n\n')
+ import_log.write('http://hg.csswg.org/test\n\n')
+ import_log.write('Then run the Tools/Scripts/import-w3c-tests in Webkit to reimport\n\n')
+ import_log.write('Do NOT modify or remove this file\n\n')
+ import_log.write('------------------------------------------------------------------------\n')
+ import_log.write('Last Import: ' + now.strftime('%Y-%m-%d %H:%M') + '\n')
+ import_log.write('W3C Mercurial changeset: ' + self.changeset + '\n')
+ import_log.write('Test status at time of import: ' + self.test_status + '\n')
+ import_log.write('------------------------------------------------------------------------\n')
+ import_log.write('Properties requiring vendor prefixes:\n')
+ if prop_list:
+ for prop in prop_list:
+ import_log.write(prop + '\n')
+ else:
+ import_log.write('None\n')
+ import_log.write('------------------------------------------------------------------------\n')
+ import_log.write('List of files:\n')
+ for item in file_list:
+ import_log.write(item + '\n')
+
+ import_log.close()
diff --git a/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py b/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
new file mode 100644
index 000000000..9103623dc
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import optparse
+import shutil
+import tempfile
+import unittest2 as unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.common.system.filesystem_mock import MockFileSystem
+from webkitpy.common.system.executive_mock import MockExecutive2, ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.w3c.test_importer import TestImporter
+
+
+FAKE_SOURCE_DIR = '/blink/w3c'
+FAKE_REPO_DIR = '/blink'
+
+FAKE_FILES = {
+ '/blink/w3c/empty_dir/README.txt': '',
+ '/mock-checkout/LayoutTests/w3c/README.txt': '',
+}
+
+class TestImporterTest(unittest.TestCase):
+
+ def test_import_dir_with_no_tests_and_no_hg(self):
+ host = MockHost()
+ host.executive = MockExecutive2(exception=OSError())
+ host.filesystem = MockFileSystem(files=FAKE_FILES)
+
+ importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False}))
+
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ importer.do_import()
+ finally:
+ oc.restore_output()
+
+ def test_import_dir_with_no_tests(self):
+ host = MockHost()
+ host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
+ host.filesystem = MockFileSystem(files=FAKE_FILES)
+
+ importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False}))
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ importer.do_import()
+ finally:
+ oc.restore_output()
+
+ # FIXME: Needs more tests.
diff --git a/Tools/Scripts/webkitpy/w3c/test_parser.py b/Tools/Scripts/webkitpy/w3c/test_parser.py
new file mode 100644
index 000000000..bb66fdae6
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_parser.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import logging
+import re
+
+from webkitpy.common.host import Host
+from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup as Parser
+
+
+_log = logging.getLogger(__name__)
+
+
+class TestParser(object):
+
+ def __init__(self, options, filename):
+ self.options = options
+ self.filename = filename
+ self.host = Host()
+ self.filesystem = self.host.filesystem
+
+ self.test_doc = None
+ self.ref_doc = None
+ self.load_file(filename)
+
+ def load_file(self, filename):
+ if self.filesystem.isfile(filename):
+ try:
+ self.test_doc = Parser(self.filesystem.read_binary_file(filename))
+ except:
+ # FIXME: Figure out what to do if we can't parse the file.
+ _log.error("Failed to parse %s", filename)
+ self.test_doc is None
+ else:
+ if self.filesystem.isdir(filename):
+ # FIXME: Figure out what is triggering this and what to do about it.
+ _log.error("Trying to load %s, which is a directory", filename)
+ self.test_doc = None
+ self.ref_doc = None
+
+ def analyze_test(self, test_contents=None, ref_contents=None):
+ """ Analyzes a file to determine if it's a test, what type of test, and what reference or support files it requires. Returns all of the test info """
+
+ test_info = None
+
+ if test_contents is None and self.test_doc is None:
+ return test_info
+
+ if test_contents is not None:
+ self.test_doc = Parser(test_contents)
+
+ if ref_contents is not None:
+ self.ref_doc = Parser(ref_contents)
+
+ # First check if it's a reftest
+
+ matches = self.reference_links_of_type('match') + self.reference_links_of_type('mismatch')
+ if matches:
+ if len(matches) > 1:
+ # FIXME: Is this actually true? We should fix this.
+ _log.warning('Multiple references are not supported. Importing the first ref defined in %s',
+ self.filesystem.basename(self.filename))
+
+ try:
+ ref_file = self.filesystem.join(self.filesystem.dirname(self.filename), matches[0]['href'])
+ except KeyError as e:
+ # FIXME: Figure out what to do w/ invalid test files.
+ _log.error('%s has a reference link but is missing the "href"', self.filesystem)
+ return None
+
+ if self.ref_doc is None:
+ self.ref_doc = self.load_file(ref_file)
+
+ test_info = {'test': self.filename, 'reference': ref_file}
+
+ # If the ref file path is relative, we need to check it for
+ # relative paths also because when it lands in WebKit, it will be
+ # moved down into the test dir.
+ #
+ # Note: The test files themselves are not checked for support files
+ # outside their directories as the convention in the CSSWG is to
+ # put all support files in the same dir or subdir as the test.
+ #
+ # All non-test files in the test's directory tree are normally
+ # copied as part of the import as they are assumed to be required
+ # support files.
+ #
+ # *But*, there is exactly one case in the entire css2.1 suite where
+ # a test depends on a file that lives in a different directory,
+ # which depends on another file that lives outside of its
+ # directory. This code covers that case :)
+ if matches[0]['href'].startswith('..'):
+ support_files = self.support_files(self.ref_doc)
+ test_info['refsupport'] = support_files
+
+ elif self.is_jstest():
+ test_info = {'test': self.filename, 'jstest': True}
+ elif self.options['all'] is True and not('-ref' in self.filename) and not('reference' in self.filename):
+ test_info = {'test': self.filename}
+
+ return test_info
+
+ def reference_links_of_type(self, reftest_type):
+ return self.test_doc.findAll(rel=reftest_type)
+
+ def is_jstest(self):
+ """Returns whether the file appears to be a jstest, by searching for usage of W3C-style testharness paths."""
+ return bool(self.test_doc.find(src=re.compile('[\'\"/]?/resources/testharness')))
+
+ def support_files(self, doc):
+ """ Searches the file for all paths specified in url()'s, href or src attributes."""
+ support_files = []
+
+ if doc is None:
+ return support_files
+
+ elements_with_src_attributes = doc.findAll(src=re.compile('.*'))
+ elements_with_href_attributes = doc.findAll(href=re.compile('.*'))
+
+ url_pattern = re.compile('url\(.*\)')
+ urls = []
+ for url in doc.findAll(text=url_pattern):
+ url = re.search(url_pattern, url)
+ url = re.sub('url\([\'\"]?', '', url.group(0))
+ url = re.sub('[\'\"]?\)', '', url)
+ urls.append(url)
+
+ src_paths = [src_tag['src'] for src_tag in elements_with_src_attributes]
+ href_paths = [href_tag['href'] for href_tag in elements_with_href_attributes]
+
+ paths = src_paths + href_paths + urls
+ for path in paths:
+ if not(path.startswith('http:')) and not(path.startswith('mailto:')):
+ support_files.append(path)
+
+ return support_files
diff --git a/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py b/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
new file mode 100644
index 000000000..7fb0c5b04
--- /dev/null
+++ b/Tools/Scripts/webkitpy/w3c/test_parser_unittest.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+
+# Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# 1. Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+# 2. Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
+# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+
+import os
+import unittest2 as unittest
+
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.w3c.test_parser import TestParser
+
+
+options = {'all': False, 'no_overwrite': False}
+
+
+class TestParserTest(unittest.TestCase):
+
+ def test_analyze_test_reftest_one_match(self):
+ test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+</head>
+"""
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ def test_analyze_test_reftest_multiple_matches(self):
+ test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="match" href="orange-box-ref.xht" />
+</head>
+"""
+ oc = OutputCapture()
+ oc.capture_output()
+ try:
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+ finally:
+ _, _, logs = oc.restore_output()
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
+
+ def test_analyze_test_reftest_match_and_mismatch(self):
+ test_html = """<head>
+<link rel="match" href="green-box-ref.xht" />
+<link rel="match" href="blue-box-ref.xht" />
+<link rel="mismatch" href="orange-box-notref.xht" />
+</head>
+"""
+ oc = OutputCapture()
+ oc.capture_output()
+
+ try:
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+ finally:
+ _, _, logs = oc.restore_output()
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ self.assertEqual(logs, 'Multiple references are not supported. Importing the first ref defined in somefile.html\n')
+
+ def test_analyze_test_reftest_with_ref_support_Files(self):
+ """ Tests analyze_test() using a reftest that has refers to a reference file outside of the tests directory and the reference file has paths to other support files """
+
+ test_html = """<html>
+<head>
+<link rel="match" href="../reference/green-box-ref.xht" />
+</head>
+"""
+ ref_html = """<head>
+<link href="support/css/ref-stylesheet.css" rel="stylesheet" type="text/css">
+<style type="text/css">
+ background-image: url("../../support/some-image.png")
+</style>
+</head>
+<body>
+<div><img src="../support/black96x96.png" alt="Image download support must be enabled" /></div>
+</body>
+</html>
+"""
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html, ref_contents=ref_html)
+
+ self.assertNotEqual(test_info, None, 'did not find a test')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertTrue('reference' in test_info.keys(), 'did not find a reference file')
+ self.assertTrue(test_info['reference'].startswith(test_path), 'reference path is not correct')
+ self.assertTrue('refsupport' in test_info.keys(), 'there should be refsupport files for this test')
+ self.assertEquals(len(test_info['refsupport']), 3, 'there should be 3 support files in this reference')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not have been analyzed as a jstest')
+
+ def test_analyze_jstest(self):
+ """ Tests analyze_test() using a jstest """
+
+ test_html = """<head>
+<link href="/resources/testharness.css" rel="stylesheet" type="text/css">
+<script src="/resources/testharness.js"></script>
+</head>
+"""
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertNotEqual(test_info, None, 'test_info is None')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertTrue('jstest' in test_info.keys(), 'test should be a jstest')
+
+ def test_analyze_pixel_test_all_true(self):
+ """ Tests analyze_test() using a test that is neither a reftest or jstest with all=False """
+
+ test_html = """<html>
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR" />
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ # Set options to 'all' so this gets found
+ options['all'] = True
+
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertNotEqual(test_info, None, 'test_info is None')
+ self.assertTrue('test' in test_info.keys(), 'did not find a test file')
+ self.assertFalse('reference' in test_info.keys(), 'shold not have found a reference file')
+ self.assertFalse('refsupport' in test_info.keys(), 'there should be no refsupport files for this test')
+ self.assertFalse('jstest' in test_info.keys(), 'test should not be a jstest')
+
+ def test_analyze_pixel_test_all_false(self):
+ """ Tests analyze_test() using a test that is neither a reftest or jstest, with -all=False """
+
+ test_html = """<html>
+<head>
+<title>CSS Test: DESCRIPTION OF TEST</title>
+<link rel="author" title="NAME_OF_AUTHOR" />
+<style type="text/css"><![CDATA[
+CSS FOR TEST
+]]></style>
+</head>
+<body>
+CONTENT OF TEST
+</body>
+</html>
+"""
+ # Set all to false so this gets skipped
+ options['all'] = False
+
+ test_path = '/some/madeup/path/'
+ parser = TestParser(options, test_path + 'somefile.html')
+ test_info = parser.analyze_test(test_contents=test_html)
+
+ self.assertEqual(test_info, None, 'test should have been skipped')
+
+ def test_analyze_non_html_file(self):
+ """ Tests analyze_test() with a file that has no html"""
+ # FIXME: use a mock filesystem
+ parser = TestParser(options, os.path.join(os.path.dirname(__file__), 'test_parser.py'))
+ test_info = parser.analyze_test()
+ self.assertEqual(test_info, None, 'no tests should have been found in this file')
diff --git a/Tools/Scripts/webkitpy/webkitpy.pyproj b/Tools/Scripts/webkitpy/webkitpy.pyproj
deleted file mode 100644
index 588cfeaf5..000000000
--- a/Tools/Scripts/webkitpy/webkitpy.pyproj
+++ /dev/null
@@ -1,538 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
- <PropertyGroup>
- <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
- <SchemaVersion>2.0</SchemaVersion>
- <ProjectGuid>{59b0a791-93fe-40f8-a52b-ba19b73e8fa6}</ProjectGuid>
- <ProjectHome>.</ProjectHome>
- <StartupFile>layout_tests\run_webkit_tests.py</StartupFile>
- <SearchPath>
- </SearchPath>
- <WorkingDirectory>../</WorkingDirectory>
- <OutputPath>.</OutputPath>
- <Name>webkitpy</Name>
- <RootNamespace>webkitpy</RootNamespace>
- <IsWindowsApplication>False</IsWindowsApplication>
- <LaunchProvider>Standard Python launcher</LaunchProvider>
- <CommandLineArguments>--platform=mock --no-pixel-tests --no-retry-failures</CommandLineArguments>
- <InterpreterPath />
- <InterpreterArguments />
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)' == 'Debug' ">
- <DebugSymbols>true</DebugSymbols>
- <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
- </PropertyGroup>
- <PropertyGroup Condition=" '$(Configuration)' == 'Release' ">
- <DebugSymbols>true</DebugSymbols>
- <EnableUnmanagedDebugging>false</EnableUnmanagedDebugging>
- </PropertyGroup>
- <ItemGroup>
- <Compile Include="bindings\main.py" />
- <Compile Include="bindings\__init__.py" />
- <Compile Include="common\checkout\baselineoptimizer.py" />
- <Compile Include="common\checkout\baselineoptimizer_unittest.py" />
- <Compile Include="common\checkout\changelog.py" />
- <Compile Include="common\checkout\changelog_unittest.py" />
- <Compile Include="common\checkout\checkout.py" />
- <Compile Include="common\checkout\checkout_mock.py" />
- <Compile Include="common\checkout\checkout_unittest.py" />
- <Compile Include="common\checkout\commitinfo.py" />
- <Compile Include="common\checkout\commitinfo_unittest.py" />
- <Compile Include="common\checkout\deps.py" />
- <Compile Include="common\checkout\deps_mock.py" />
- <Compile Include="common\checkout\diff_parser.py" />
- <Compile Include="common\checkout\diff_parser_unittest.py" />
- <Compile Include="common\checkout\diff_test_data.py" />
- <Compile Include="common\checkout\scm\commitmessage.py" />
- <Compile Include="common\checkout\scm\detection.py" />
- <Compile Include="common\checkout\scm\detection_unittest.py" />
- <Compile Include="common\checkout\scm\git.py" />
- <Compile Include="common\checkout\scm\scm.py" />
- <Compile Include="common\checkout\scm\scm_mock.py" />
- <Compile Include="common\checkout\scm\scm_unittest.py" />
- <Compile Include="common\checkout\scm\svn.py" />
- <Compile Include="common\checkout\scm\__init__.py" />
- <Compile Include="common\checkout\__init__.py" />
- <Compile Include="common\checksvnconfigfile.py" />
- <Compile Include="common\config\build.py" />
- <Compile Include="common\config\build_unittest.py" />
- <Compile Include="common\config\committers.py" />
- <Compile Include="common\config\committers_unittest.py" />
- <Compile Include="common\config\committervalidator.py" />
- <Compile Include="common\config\committervalidator_unittest.py" />
- <Compile Include="common\config\contributionareas.py" />
- <Compile Include="common\config\contributionareas_unittest.py" />
- <Compile Include="common\config\irc.py" />
- <Compile Include="common\config\ports.py" />
- <Compile Include="common\config\ports_mock.py" />
- <Compile Include="common\config\ports_unittest.py" />
- <Compile Include="common\config\urls.py" />
- <Compile Include="common\config\urls_unittest.py" />
- <Compile Include="common\config\__init__.py" />
- <Compile Include="common\editdistance.py" />
- <Compile Include="common\editdistance_unittest.py" />
- <Compile Include="common\find_files.py" />
- <Compile Include="common\find_files_unittest.py" />
- <Compile Include="common\host.py" />
- <Compile Include="common\host_mock.py" />
- <Compile Include="common\lru_cache.py" />
- <Compile Include="common\lru_cache_unittest.py" />
- <Compile Include="common\memoized.py" />
- <Compile Include="common\memoized_unittest.py" />
- <Compile Include="common\message_pool.py" />
- <Compile Include="common\net\bugzilla\attachment.py" />
- <Compile Include="common\net\bugzilla\bug.py" />
- <Compile Include="common\net\bugzilla\bugzilla.py" />
- <Compile Include="common\net\bugzilla\bugzilla_mock.py" />
- <Compile Include="common\net\bugzilla\bugzilla_unittest.py" />
- <Compile Include="common\net\bugzilla\bug_unittest.py" />
- <Compile Include="common\net\bugzilla\__init__.py" />
- <Compile Include="common\net\buildbot\buildbot.py" />
- <Compile Include="common\net\buildbot\buildbot_mock.py" />
- <Compile Include="common\net\buildbot\buildbot_unittest.py" />
- <Compile Include="common\net\buildbot\chromiumbuildbot.py" />
- <Compile Include="common\net\buildbot\__init__.py" />
- <Compile Include="common\net\credentials.py" />
- <Compile Include="common\net\credentials_unittest.py" />
- <Compile Include="common\net\failuremap.py" />
- <Compile Include="common\net\failuremap_unittest.py" />
- <Compile Include="common\net\file_uploader.py" />
- <Compile Include="common\net\htdigestparser.py" />
- <Compile Include="common\net\htdigestparser_unittest.py" />
- <Compile Include="common\net\irc\ircbot.py" />
- <Compile Include="common\net\irc\ircproxy.py" />
- <Compile Include="common\net\irc\ircproxy_unittest.py" />
- <Compile Include="common\net\irc\irc_mock.py" />
- <Compile Include="common\net\irc\__init__.py" />
- <Compile Include="common\net\layouttestresults.py" />
- <Compile Include="common\net\layouttestresults_unittest.py" />
- <Compile Include="common\net\networktransaction.py" />
- <Compile Include="common\net\networktransaction_unittest.py" />
- <Compile Include="common\net\omahaproxy.py" />
- <Compile Include="common\net\omahaproxy_unittest.py" />
- <Compile Include="common\net\regressionwindow.py" />
- <Compile Include="common\net\resultsjsonparser.py" />
- <Compile Include="common\net\resultsjsonparser_unittest.py" />
- <Compile Include="common\net\statusserver.py" />
- <Compile Include="common\net\statusserver_mock.py" />
- <Compile Include="common\net\statusserver_unittest.py" />
- <Compile Include="common\net\unittestresults.py" />
- <Compile Include="common\net\unittestresults_unittest.py" />
- <Compile Include="common\net\web.py" />
- <Compile Include="common\net\web_mock.py" />
- <Compile Include="common\net\__init__.py" />
- <Compile Include="common\newstringio.py" />
- <Compile Include="common\newstringio_unittest.py" />
- <Compile Include="common\prettypatch.py" />
- <Compile Include="common\prettypatch_unittest.py" />
- <Compile Include="common\read_checksum_from_png.py" />
- <Compile Include="common\read_checksum_from_png_unittest.py" />
- <Compile Include="common\system\autoinstall.py" />
- <Compile Include="common\system\crashlogs.py" />
- <Compile Include="common\system\crashlogs_unittest.py" />
- <Compile Include="common\system\environment.py" />
- <Compile Include="common\system\environment_unittest.py" />
- <Compile Include="common\system\executive.py" />
- <Compile Include="common\system\executive_mock.py" />
- <Compile Include="common\system\executive_unittest.py" />
- <Compile Include="common\system\fileset.py" />
- <Compile Include="common\system\filesystem.py" />
- <Compile Include="common\system\filesystem_mock.py" />
- <Compile Include="common\system\filesystem_mock_unittest.py" />
- <Compile Include="common\system\filesystem_unittest.py" />
- <Compile Include="common\system\file_lock.py" />
- <Compile Include="common\system\file_lock_integrationtest.py" />
- <Compile Include="common\system\logtesting.py" />
- <Compile Include="common\system\logutils.py" />
- <Compile Include="common\system\logutils_unittest.py" />
- <Compile Include="common\system\outputcapture.py" />
- <Compile Include="common\system\outputcapture_unittest.py" />
- <Compile Include="common\system\path.py" />
- <Compile Include="common\system\path_unittest.py" />
- <Compile Include="common\system\platforminfo.py" />
- <Compile Include="common\system\platforminfo_mock.py" />
- <Compile Include="common\system\platforminfo_unittest.py" />
- <Compile Include="common\system\stack_utils.py" />
- <Compile Include="common\system\stack_utils_unittest.py" />
- <Compile Include="common\system\systemhost.py" />
- <Compile Include="common\system\systemhost_mock.py" />
- <Compile Include="common\system\urlfetcher.py" />
- <Compile Include="common\system\urlfetcher_mock.py" />
- <Compile Include="common\system\user.py" />
- <Compile Include="common\system\user_mock.py" />
- <Compile Include="common\system\user_unittest.py" />
- <Compile Include="common\system\workspace.py" />
- <Compile Include="common\system\workspace_mock.py" />
- <Compile Include="common\system\workspace_unittest.py" />
- <Compile Include="common\system\zipfileset.py" />
- <Compile Include="common\system\zipfileset_mock.py" />
- <Compile Include="common\system\zipfileset_unittest.py" />
- <Compile Include="common\system\zip_mock.py" />
- <Compile Include="common\system\__init__.py" />
- <Compile Include="common\thread\messagepump.py" />
- <Compile Include="common\thread\messagepump_unittest.py" />
- <Compile Include="common\thread\threadedmessagequeue.py" />
- <Compile Include="common\thread\threadedmessagequeue_unittest.py" />
- <Compile Include="common\thread\__init__.py" />
- <Compile Include="common\version_check.py" />
- <Compile Include="common\watchlist\amountchangedpattern.py" />
- <Compile Include="common\watchlist\amountchangedpattern_unittest.py" />
- <Compile Include="common\watchlist\changedlinepattern.py" />
- <Compile Include="common\watchlist\changedlinepattern_unittest.py" />
- <Compile Include="common\watchlist\filenamepattern.py" />
- <Compile Include="common\watchlist\filenamepattern_unittest.py" />
- <Compile Include="common\watchlist\watchlist.py" />
- <Compile Include="common\watchlist\watchlistloader.py" />
- <Compile Include="common\watchlist\watchlistloader_unittest.py" />
- <Compile Include="common\watchlist\watchlistparser.py" />
- <Compile Include="common\watchlist\watchlistparser_unittest.py" />
- <Compile Include="common\watchlist\watchlistrule.py" />
- <Compile Include="common\watchlist\watchlistrule_unittest.py" />
- <Compile Include="common\watchlist\watchlist_mock.py" />
- <Compile Include="common\watchlist\watchlist_unittest.py" />
- <Compile Include="common\watchlist\__init__.py" />
- <Compile Include="common\webkitunittest.py" />
- <Compile Include="common\__init__.py" />
- <Compile Include="layout_tests\controllers\manager.py" />
- <Compile Include="layout_tests\controllers\manager_unittest.py" />
- <Compile Include="layout_tests\controllers\single_test_runner.py" />
- <Compile Include="layout_tests\controllers\test_expectations_editor.py" />
- <Compile Include="layout_tests\controllers\test_expectations_editor_unittest.py" />
- <Compile Include="layout_tests\controllers\test_result_writer.py" />
- <Compile Include="layout_tests\controllers\test_result_writer_unittest.py" />
- <Compile Include="layout_tests\controllers\worker.py" />
- <Compile Include="layout_tests\controllers\__init__.py" />
- <Compile Include="layout_tests\layout_package\json_layout_results_generator.py" />
- <Compile Include="layout_tests\layout_package\json_results_generator.py" />
- <Compile Include="layout_tests\layout_package\json_results_generator_unittest.py" />
- <Compile Include="layout_tests\layout_package\__init__.py" />
- <Compile Include="layout_tests\models\result_summary.py" />
- <Compile Include="layout_tests\models\test_configuration.py" />
- <Compile Include="layout_tests\models\test_configuration_unittest.py" />
- <Compile Include="layout_tests\models\test_expectations.py" />
- <Compile Include="layout_tests\models\test_expectations_unittest.py" />
- <Compile Include="layout_tests\models\test_failures.py" />
- <Compile Include="layout_tests\models\test_failures_unittest.py" />
- <Compile Include="layout_tests\models\test_input.py" />
- <Compile Include="layout_tests\models\test_results.py" />
- <Compile Include="layout_tests\models\test_results_unittest.py" />
- <Compile Include="layout_tests\models\__init__.py" />
- <Compile Include="layout_tests\port\apple.py" />
- <Compile Include="layout_tests\port\base.py" />
- <Compile Include="layout_tests\port\base_unittest.py" />
- <Compile Include="layout_tests\port\builders.py" />
- <Compile Include="layout_tests\port\builders_unittest.py" />
- <Compile Include="layout_tests\port\chromium.py" />
- <Compile Include="layout_tests\port\chromium_android.py" />
- <Compile Include="layout_tests\port\chromium_android_unittest.py" />
- <Compile Include="layout_tests\port\chromium_linux.py" />
- <Compile Include="layout_tests\port\chromium_linux_unittest.py" />
- <Compile Include="layout_tests\port\chromium_mac.py" />
- <Compile Include="layout_tests\port\chromium_mac_unittest.py" />
- <Compile Include="layout_tests\port\chromium_port_testcase.py" />
- <Compile Include="layout_tests\port\chromium_unittest.py" />
- <Compile Include="layout_tests\port\chromium_win.py" />
- <Compile Include="layout_tests\port\chromium_win_unittest.py" />
- <Compile Include="layout_tests\port\config.py" />
- <Compile Include="layout_tests\port\config_mock.py" />
- <Compile Include="layout_tests\port\config_standalone.py" />
- <Compile Include="layout_tests\port\config_unittest.py" />
- <Compile Include="layout_tests\port\driver.py" />
- <Compile Include="layout_tests\port\driver_unittest.py" />
- <Compile Include="layout_tests\port\efl.py" />
- <Compile Include="layout_tests\port\efl_unittest.py" />
- <Compile Include="layout_tests\port\factory.py" />
- <Compile Include="layout_tests\port\factory_unittest.py" />
- <Compile Include="layout_tests\port\gtk.py" />
- <Compile Include="layout_tests\port\gtk_unittest.py" />
- <Compile Include="layout_tests\port\http_lock.py" />
- <Compile Include="layout_tests\port\http_lock_unittest.py" />
- <Compile Include="layout_tests\port\leakdetector.py" />
- <Compile Include="layout_tests\port\leakdetector_unittest.py" />
- <Compile Include="layout_tests\port\mac.py" />
- <Compile Include="layout_tests\port\mac_unittest.py" />
- <Compile Include="layout_tests\port\mock_drt.py" />
- <Compile Include="layout_tests\port\mock_drt_unittest.py" />
- <Compile Include="layout_tests\port\port_testcase.py" />
- <Compile Include="layout_tests\port\pulseaudio_sanitizer.py" />
- <Compile Include="layout_tests\port\qt.py" />
- <Compile Include="layout_tests\port\qt_unittest.py" />
- <Compile Include="layout_tests\port\server_process.py" />
- <Compile Include="layout_tests\port\server_process_unittest.py" />
- <Compile Include="layout_tests\port\test.py" />
- <Compile Include="layout_tests\port\webkit.py" />
- <Compile Include="layout_tests\port\webkit_unittest.py" />
- <Compile Include="layout_tests\port\win.py" />
- <Compile Include="layout_tests\port\win_unittest.py" />
- <Compile Include="layout_tests\port\xvfbdriver.py" />
- <Compile Include="layout_tests\port\__init__.py" />
- <Compile Include="layout_tests\reftests\extract_reference_link.py" />
- <Compile Include="layout_tests\reftests\extract_reference_link_unittest.py" />
- <Compile Include="layout_tests\reftests\__init__.py" />
- <Compile Include="layout_tests\run_webkit_tests.py" />
- <Compile Include="layout_tests\run_webkit_tests_integrationtest.py" />
- <Compile Include="layout_tests\servers\apache_http_server.py" />
- <Compile Include="layout_tests\servers\apache_http_server_unittest.py" />
- <Compile Include="layout_tests\servers\http_server.py" />
- <Compile Include="layout_tests\servers\http_server_base.py" />
- <Compile Include="layout_tests\servers\http_server_integrationtest.py" />
- <Compile Include="layout_tests\servers\http_server_unittest.py" />
- <Compile Include="layout_tests\servers\websocket_server.py" />
- <Compile Include="layout_tests\servers\__init__.py" />
- <Compile Include="layout_tests\views\metered_stream.py" />
- <Compile Include="layout_tests\views\metered_stream_unittest.py" />
- <Compile Include="layout_tests\views\printing.py" />
- <Compile Include="layout_tests\views\printing_unittest.py" />
- <Compile Include="layout_tests\views\__init__.py" />
- <Compile Include="layout_tests\__init__.py" />
- <Compile Include="performance_tests\perftest.py" />
- <Compile Include="performance_tests\perftestsrunner.py" />
- <Compile Include="performance_tests\perftestsrunner_unittest.py" />
- <Compile Include="performance_tests\perftest_unittest.py" />
- <Compile Include="performance_tests\__init__.py" />
- <Compile Include="style\checker.py" />
- <Compile Include="style\checkers\changelog.py" />
- <Compile Include="style\checkers\changelog_unittest.py" />
- <Compile Include="style\checkers\common.py" />
- <Compile Include="style\checkers\common_unittest.py" />
- <Compile Include="style\checkers\cpp.py" />
- <Compile Include="style\checkers\cpp_unittest.py" />
- <Compile Include="style\checkers\jsonchecker.py" />
- <Compile Include="style\checkers\jsonchecker_unittest.py" />
- <Compile Include="style\checkers\png.py" />
- <Compile Include="style\checkers\png_unittest.py" />
- <Compile Include="style\checkers\python.py" />
- <Compile Include="style\checkers\python_unittest.py" />
- <Compile Include="style\checkers\python_unittest_input.py" />
- <Compile Include="style\checkers\test_expectations.py" />
- <Compile Include="style\checkers\test_expectations_unittest.py" />
- <Compile Include="style\checkers\text.py" />
- <Compile Include="style\checkers\text_unittest.py" />
- <Compile Include="style\checkers\watchlist.py" />
- <Compile Include="style\checkers\watchlist_unittest.py" />
- <Compile Include="style\checkers\xcodeproj.py" />
- <Compile Include="style\checkers\xcodeproj_unittest.py" />
- <Compile Include="style\checkers\xml.py" />
- <Compile Include="style\checkers\xml_unittest.py" />
- <Compile Include="style\checkers\__init__.py" />
- <Compile Include="style\checker_unittest.py" />
- <Compile Include="style\error_handlers.py" />
- <Compile Include="style\error_handlers_unittest.py" />
- <Compile Include="style\filereader.py" />
- <Compile Include="style\filereader_unittest.py" />
- <Compile Include="style\filter.py" />
- <Compile Include="style\filter_unittest.py" />
- <Compile Include="style\main.py" />
- <Compile Include="style\main_unittest.py" />
- <Compile Include="style\optparser.py" />
- <Compile Include="style\optparser_unittest.py" />
- <Compile Include="style\patchreader.py" />
- <Compile Include="style\patchreader_unittest.py" />
- <Compile Include="style\__init__.py" />
- <Compile Include="test\finder.py" />
- <Compile Include="test\finder_unittest.py" />
- <Compile Include="test\main.py" />
- <Compile Include="test\main_unittest.py" />
- <Compile Include="test\printer.py" />
- <Compile Include="test\runner.py" />
- <Compile Include="test\runner_unittest.py" />
- <Compile Include="test\skip.py" />
- <Compile Include="test\skip_unittest.py" />
- <Compile Include="test\__init__.py" />
- <Compile Include="thirdparty\BeautifulSoup.py" />
- <Compile Include="thirdparty\mock.py" />
- <Compile Include="thirdparty\mod_pywebsocket\common.py" />
- <Compile Include="thirdparty\mod_pywebsocket\dispatch.py" />
- <Compile Include="thirdparty\mod_pywebsocket\extensions.py" />
- <Compile Include="thirdparty\mod_pywebsocket\handshake\draft75.py" />
- <Compile Include="thirdparty\mod_pywebsocket\handshake\hybi.py" />
- <Compile Include="thirdparty\mod_pywebsocket\handshake\hybi00.py" />
- <Compile Include="thirdparty\mod_pywebsocket\handshake\_base.py" />
- <Compile Include="thirdparty\mod_pywebsocket\handshake\__init__.py" />
- <Compile Include="thirdparty\mod_pywebsocket\headerparserhandler.py" />
- <Compile Include="thirdparty\mod_pywebsocket\http_header_util.py" />
- <Compile Include="thirdparty\mod_pywebsocket\memorizingfile.py" />
- <Compile Include="thirdparty\mod_pywebsocket\msgutil.py" />
- <Compile Include="thirdparty\mod_pywebsocket\standalone.py" />
- <Compile Include="thirdparty\mod_pywebsocket\stream.py" />
- <Compile Include="thirdparty\mod_pywebsocket\util.py" />
- <Compile Include="thirdparty\mod_pywebsocket\_stream_base.py" />
- <Compile Include="thirdparty\mod_pywebsocket\_stream_hixie75.py" />
- <Compile Include="thirdparty\mod_pywebsocket\_stream_hybi.py" />
- <Compile Include="thirdparty\mod_pywebsocket\__init__.py" />
- <Compile Include="thirdparty\ordered_dict.py" />
- <Compile Include="thirdparty\__init__.py" />
- <Compile Include="thirdparty\__init___unittest.py" />
- <Compile Include="tool\bot\botinfo.py" />
- <Compile Include="tool\bot\botinfo_unittest.py" />
- <Compile Include="tool\bot\commitqueuetask.py" />
- <Compile Include="tool\bot\commitqueuetask_unittest.py" />
- <Compile Include="tool\bot\earlywarningsystemtask.py" />
- <Compile Include="tool\bot\expectedfailures.py" />
- <Compile Include="tool\bot\expectedfailures_unittest.py" />
- <Compile Include="tool\bot\feeders.py" />
- <Compile Include="tool\bot\feeders_unittest.py" />
- <Compile Include="tool\bot\flakytestreporter.py" />
- <Compile Include="tool\bot\flakytestreporter_unittest.py" />
- <Compile Include="tool\bot\irc_command.py" />
- <Compile Include="tool\bot\irc_command_unittest.py" />
- <Compile Include="tool\bot\ircbot.py" />
- <Compile Include="tool\bot\ircbot_unittest.py" />
- <Compile Include="tool\bot\layouttestresultsreader.py" />
- <Compile Include="tool\bot\layouttestresultsreader_unittest.py" />
- <Compile Include="tool\bot\patchanalysistask.py" />
- <Compile Include="tool\bot\queueengine.py" />
- <Compile Include="tool\bot\queueengine_unittest.py" />
- <Compile Include="tool\bot\sheriff.py" />
- <Compile Include="tool\bot\sheriff_unittest.py" />
- <Compile Include="tool\bot\stylequeuetask.py" />
- <Compile Include="tool\bot\__init__.py" />
- <Compile Include="tool\commands\abstractlocalservercommand.py" />
- <Compile Include="tool\commands\abstractsequencedcommand.py" />
- <Compile Include="tool\commands\adduserstogroups.py" />
- <Compile Include="tool\commands\analyzechangelog.py" />
- <Compile Include="tool\commands\analyzechangelog_unittest.py" />
- <Compile Include="tool\commands\applywatchlistlocal.py" />
- <Compile Include="tool\commands\applywatchlistlocal_unittest.py" />
- <Compile Include="tool\commands\bugfortest.py" />
- <Compile Include="tool\commands\bugsearch.py" />
- <Compile Include="tool\commands\chromechannels.py" />
- <Compile Include="tool\commands\chromechannels_unittest.py" />
- <Compile Include="tool\commands\commandtest.py" />
- <Compile Include="tool\commands\download.py" />
- <Compile Include="tool\commands\download_unittest.py" />
- <Compile Include="tool\commands\earlywarningsystem.py" />
- <Compile Include="tool\commands\earlywarningsystem_unittest.py" />
- <Compile Include="tool\commands\expectations.py" />
- <Compile Include="tool\commands\findusers.py" />
- <Compile Include="tool\commands\gardenomatic.py" />
- <Compile Include="tool\commands\openbugs.py" />
- <Compile Include="tool\commands\openbugs_unittest.py" />
- <Compile Include="tool\commands\prettydiff.py" />
- <Compile Include="tool\commands\queries.py" />
- <Compile Include="tool\commands\queries_unittest.py" />
- <Compile Include="tool\commands\queues.py" />
- <Compile Include="tool\commands\queuestest.py" />
- <Compile Include="tool\commands\queues_unittest.py" />
- <Compile Include="tool\commands\rebaseline.py" />
- <Compile Include="tool\commands\rebaselineserver.py" />
- <Compile Include="tool\commands\rebaseline_unittest.py" />
- <Compile Include="tool\commands\roll.py" />
- <Compile Include="tool\commands\roll_unittest.py" />
- <Compile Include="tool\commands\sheriffbot.py" />
- <Compile Include="tool\commands\sheriffbot_unittest.py" />
- <Compile Include="tool\commands\stepsequence.py" />
- <Compile Include="tool\commands\suggestnominations.py" />
- <Compile Include="tool\commands\suggestnominations_unittest.py" />
- <Compile Include="tool\commands\upload.py" />
- <Compile Include="tool\commands\upload_unittest.py" />
- <Compile Include="tool\commands\__init__.py" />
- <Compile Include="tool\comments.py" />
- <Compile Include="tool\grammar.py" />
- <Compile Include="tool\grammar_unittest.py" />
- <Compile Include="tool\main.py" />
- <Compile Include="tool\mocktool.py" />
- <Compile Include="tool\mocktool_unittest.py" />
- <Compile Include="tool\multicommandtool.py" />
- <Compile Include="tool\multicommandtool_unittest.py" />
- <Compile Include="tool\servers\gardeningserver.py" />
- <Compile Include="tool\servers\gardeningserver_unittest.py" />
- <Compile Include="tool\servers\rebaselineserver.py" />
- <Compile Include="tool\servers\rebaselineserver_unittest.py" />
- <Compile Include="tool\servers\reflectionhandler.py" />
- <Compile Include="tool\servers\__init__.py" />
- <Compile Include="tool\steps\abstractstep.py" />
- <Compile Include="tool\steps\addsvnmimetypeforpng.py" />
- <Compile Include="tool\steps\addsvnmimetypeforpng_unittest.py" />
- <Compile Include="tool\steps\applypatch.py" />
- <Compile Include="tool\steps\applypatchwithlocalcommit.py" />
- <Compile Include="tool\steps\applywatchlist.py" />
- <Compile Include="tool\steps\applywatchlist_unittest.py" />
- <Compile Include="tool\steps\attachtobug.py" />
- <Compile Include="tool\steps\build.py" />
- <Compile Include="tool\steps\checkstyle.py" />
- <Compile Include="tool\steps\cleanworkingdirectory.py" />
- <Compile Include="tool\steps\cleanworkingdirectorywithlocalcommits.py" />
- <Compile Include="tool\steps\cleanworkingdirectory_unittest.py" />
- <Compile Include="tool\steps\closebug.py" />
- <Compile Include="tool\steps\closebugforlanddiff.py" />
- <Compile Include="tool\steps\closebugforlanddiff_unittest.py" />
- <Compile Include="tool\steps\closepatch.py" />
- <Compile Include="tool\steps\commit.py" />
- <Compile Include="tool\steps\commit_unittest.py" />
- <Compile Include="tool\steps\confirmdiff.py" />
- <Compile Include="tool\steps\createbug.py" />
- <Compile Include="tool\steps\editchangelog.py" />
- <Compile Include="tool\steps\ensurebugisopenandassigned.py" />
- <Compile Include="tool\steps\ensurelocalcommitifneeded.py" />
- <Compile Include="tool\steps\metastep.py" />
- <Compile Include="tool\steps\obsoletepatches.py" />
- <Compile Include="tool\steps\options.py" />
- <Compile Include="tool\steps\postdiff.py" />
- <Compile Include="tool\steps\postdiffforcommit.py" />
- <Compile Include="tool\steps\postdiffforrevert.py" />
- <Compile Include="tool\steps\preparechangelog.py" />
- <Compile Include="tool\steps\preparechangelogfordepsroll.py" />
- <Compile Include="tool\steps\preparechangelogforrevert.py" />
- <Compile Include="tool\steps\preparechangelogforrevert_unittest.py" />
- <Compile Include="tool\steps\preparechangelog_unittest.py" />
- <Compile Include="tool\steps\promptforbugortitle.py" />
- <Compile Include="tool\steps\reopenbugafterrollout.py" />
- <Compile Include="tool\steps\revertrevision.py" />
- <Compile Include="tool\steps\runtests.py" />
- <Compile Include="tool\steps\runtests_unittest.py" />
- <Compile Include="tool\steps\steps_unittest.py" />
- <Compile Include="tool\steps\suggestreviewers.py" />
- <Compile Include="tool\steps\suggestreviewers_unittest.py" />
- <Compile Include="tool\steps\update.py" />
- <Compile Include="tool\steps\updatechangelogswithreviewer.py" />
- <Compile Include="tool\steps\updatechangelogswithreview_unittest.py" />
- <Compile Include="tool\steps\updatechromiumdeps.py" />
- <Compile Include="tool\steps\update_unittest.py" />
- <Compile Include="tool\steps\validatechangelogs.py" />
- <Compile Include="tool\steps\validatechangelogs_unittest.py" />
- <Compile Include="tool\steps\validatereviewer.py" />
- <Compile Include="tool\steps\__init__.py" />
- <Compile Include="tool\__init__.py" />
- <Compile Include="to_be_moved\update_webgl_conformance_tests.py" />
- <Compile Include="to_be_moved\update_webgl_conformance_tests_unittest.py" />
- <Compile Include="to_be_moved\__init__.py" />
- <Compile Include="__init__.py" />
- </ItemGroup>
- <ItemGroup>
- <Folder Include="bindings\" />
- <Folder Include="common\" />
- <Folder Include="common\checkout\" />
- <Folder Include="common\checkout\scm\" />
- <Folder Include="common\config\" />
- <Folder Include="common\net\" />
- <Folder Include="common\net\bugzilla\" />
- <Folder Include="common\net\buildbot\" />
- <Folder Include="common\net\irc\" />
- <Folder Include="common\system\" />
- <Folder Include="common\thread\" />
- <Folder Include="common\watchlist\" />
- <Folder Include="layout_tests\" />
- <Folder Include="layout_tests\controllers\" />
- <Folder Include="layout_tests\layout_package\" />
- <Folder Include="layout_tests\models\" />
- <Folder Include="layout_tests\port\" />
- <Folder Include="layout_tests\reftests\" />
- <Folder Include="layout_tests\servers\" />
- <Folder Include="layout_tests\views\" />
- <Folder Include="performance_tests\" />
- <Folder Include="style\" />
- <Folder Include="style\checkers\" />
- <Folder Include="test\" />
- <Folder Include="thirdparty\" />
- <Folder Include="thirdparty\mod_pywebsocket\" />
- <Folder Include="thirdparty\mod_pywebsocket\handshake\" />
- <Folder Include="tool\" />
- <Folder Include="tool\bot\" />
- <Folder Include="tool\commands\" />
- <Folder Include="tool\servers\" />
- <Folder Include="tool\steps\" />
- <Folder Include="to_be_moved\" />
- </ItemGroup>
- <Import Project="$(MSBuildToolsPath)\Microsoft.Common.targets" />
-</Project> \ No newline at end of file
diff --git a/Tools/Scripts/webkitpy/webkitpy.sln b/Tools/Scripts/webkitpy/webkitpy.sln
deleted file mode 100644
index c6583b857..000000000
--- a/Tools/Scripts/webkitpy/webkitpy.sln
+++ /dev/null
@@ -1,18 +0,0 @@
-
-Microsoft Visual Studio Solution File, Format Version 11.00
-# Visual Studio 2010
-Project("{888888A0-9F3D-457C-B088-3A5042F75D52}") = "webkitpy", "webkitpy.pyproj", "{59B0A791-93FE-40F8-A52B-BA19B73E8FA6}"
-EndProject
-Global
- GlobalSection(SolutionConfigurationPlatforms) = preSolution
- Debug|Any CPU = Debug|Any CPU
- Release|Any CPU = Release|Any CPU
- EndGlobalSection
- GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {59B0A791-93FE-40F8-A52B-BA19B73E8FA6}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {59B0A791-93FE-40F8-A52B-BA19B73E8FA6}.Release|Any CPU.ActiveCfg = Release|Any CPU
- EndGlobalSection
- GlobalSection(SolutionProperties) = preSolution
- HideSolutionNode = FALSE
- EndGlobalSection
-EndGlobal
diff --git a/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt
new file mode 100644
index 000000000..32e966fe3
--- /dev/null
+++ b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/fake-data-failing-expected.txt
@@ -0,0 +1,11 @@
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:2' included forbidden macro 'PLATFORM' => '#if PLATFORM(MAC)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:4' included forbidden macro 'CPU' => '#if CPU(X86)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:6' included forbidden macro 'OS' => '#if OS(DARWIN)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:8' included forbidden macro 'COMPILER' => '#if COMPILER(CLANG)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:10' included forbidden macro 'ENABLE' => '#if ENABLE(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:12' included forbidden macro 'HAVE' => '#if HAVE(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:14' included forbidden macro 'USE' => '#if USE(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:16' included forbidden macro 'COMPILER' => '#if COMPILER_SUPPORTS(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:18' included forbidden macro 'COMPILER' => '#if COMPILER_QUIRK(FEATURE)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:23' included forbidden macro 'PLATFORM' => ' #if PLATFORM(X)'
+ERROR: '--stripped--/Fake.framework/Headers/Fail.h:28' included forbidden macro 'PLATFORM' => '#if defined(ignored) && PLATFORM(X)'
diff --git a/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/pass-expected.txt
diff --git a/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h
new file mode 100644
index 000000000..6b420924c
--- /dev/null
+++ b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Fail.h
@@ -0,0 +1,29 @@
+// Common macros that we want to catch.
+#if PLATFORM(MAC)
+#endif
+#if CPU(X86)
+#endif
+#if OS(DARWIN)
+#endif
+#if COMPILER(CLANG)
+#endif
+#if ENABLE(FEATURE)
+#endif
+#if HAVE(FEATURE)
+#endif
+#if USE(FEATURE)
+#endif
+#if COMPILER_SUPPORTS(FEATURE)
+#endif
+#if COMPILER_QUIRK(FEATURE)
+#endif
+
+// Indented.
+#if 1
+ #if PLATFORM(X)
+ #endif
+#endif
+
+// Conditionals, we don't evalute. We just check for the existence of the macro.
+#if defined(ignored) && PLATFORM(X)
+#endif
diff --git a/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h
new file mode 100644
index 000000000..3a8a15d38
--- /dev/null
+++ b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/resources/Fake.framework/Headers/Pass.h
@@ -0,0 +1,6 @@
+// A macro word in a #error should not matter, that is just a coincidence.
+#error PLATFORM
+
+// There are references to a OS2, but that is not the OS() macro.
+#if defined(__OS2__) || defined(OS2)
+#endif
diff --git a/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb
new file mode 100755
index 000000000..e362ba399
--- /dev/null
+++ b/Tools/Scripts/webkitruby/check-for-inappropriate-macros-in-external-headers-tests/run-test.rb
@@ -0,0 +1,74 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Testing Tools/Scripts/check-for-macros-in-external-headers
+$test_directory = File.dirname(__FILE__)
+$tool = File.expand_path(File.join($test_directory, '..', '..', 'check-for-inappropriate-macros-in-external-headers'))
+puts "Testing: Tools/Scripts/check-for-inappropriate-macros-in-external-headers"
+
+$was_failure = false
+
+def sanitized_output(output)
+ lines = output.split("\n").map { |line| line.sub(/\'(.*)?\/(.*)?\.framework/, "'--stripped--/\\2.framework") }
+ lines.join("\n") + (lines.empty? ? "" : "\n")
+end
+
+def run_test(config)
+ ENV['TARGET_BUILD_DIR'] = File.join($test_directory, 'resources')
+ ENV['PROJECT_NAME'] = config[:framework]
+ ENV['SHALLOW_BUNDLE'] = config[:shallow] ? 'YES' : 'NO'
+ output = sanitized_output %x{ #{$tool} #{config[:paths].join(' ')} 2>&1 }
+
+ if config[:expectedToPass] != ($?.exitstatus == 0)
+ pass = false
+ else
+ expected_output = File.read File.join($test_directory, config[:expectedOutput])
+ pass = output == expected_output
+ end
+
+ puts "#{pass ? "PASS" : "FAIL"} - #{config[:name]}"
+ $was_failure = true if !pass
+end
+
+[
+ {
+ :name => 'test_good_fake_data',
+ :framework => 'Fake',
+ :shallow => true,
+ :paths => ['Headers/Pass.h'],
+ :expectedToPass => true,
+ :expectedOutput => 'pass-expected.txt'
+ },
+ {
+ :name => 'test_bad_fake_data',
+ :framework => 'Fake',
+ :shallow => true,
+ :paths => ['Headers/Fail.h'],
+ :expectedToPass => false,
+ :expectedOutput => 'fake-data-failing-expected.txt'
+ }
+].each { |x| run_test(x) }
+
+exit 1 if $was_failure