summaryrefslogtreecommitdiff
path: root/Tools/Scripts/webkitpy
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-08-12 09:27:39 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-08-12 09:27:39 +0200
commit3749d61e1f7a59f5ec5067e560af1eb610c82015 (patch)
tree73dc228333948738bbe02976cacca8cd382bc978 /Tools/Scripts/webkitpy
parentb32b4dcd9a51ab8de6afc53d9e17f8707e1f7a5e (diff)
downloadqtwebkit-3749d61e1f7a59f5ec5067e560af1eb610c82015.tar.gz
Imported WebKit commit a77350243e054f3460d1137301d8b3faee3d2052 (http://svn.webkit.org/repository/webkit/trunk@125365)
New snapshot with build fixes for latest API changes in Qt and all WK1 Win MSVC fixes upstream
Diffstat (limited to 'Tools/Scripts/webkitpy')
-rw-r--r--Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py4
-rw-r--r--Tools/Scripts/webkitpy/common/config/committers.py5
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports.py15
-rw-r--r--Tools/Scripts/webkitpy/common/config/ports_unittest.py4
-rwxr-xr-xTools/Scripts/webkitpy/common/config/watchlist23
-rw-r--r--Tools/Scripts/webkitpy/common/message_pool.py17
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py9
-rw-r--r--Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py10
-rw-r--r--Tools/Scripts/webkitpy/common/net/credentials.py19
-rw-r--r--Tools/Scripts/webkitpy/common/net/credentials_unittest.py28
-rw-r--r--Tools/Scripts/webkitpy/common/prettypatch_unittest.py5
-rwxr-xr-xTools/Scripts/webkitpy/common/system/autoinstall.py3
-rw-r--r--Tools/Scripts/webkitpy/common/system/executive_unittest.py14
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem.py3
-rw-r--r--Tools/Scripts/webkitpy/common/system/filesystem_mock.py10
-rw-r--r--Tools/Scripts/webkitpy/common/system/outputcapture.py8
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py178
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py627
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py375
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager.py727
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py400
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py20
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/controllers/worker.py231
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/result_summary.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py56
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py20
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/models/test_input.py24
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/base.py40
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py4
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/chromium.py30
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py292
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py222
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py5
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/driver.py19
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/efl.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/gtk.py3
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/image_diff.py17
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py9
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/mac.py2
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py11
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/port_testcase.py77
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/qt.py10
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py4
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/port/test.py12
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/port/webkit.py35
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests.py34
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py60
-rwxr-xr-xTools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py6
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing.py608
-rw-r--r--Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py331
-rw-r--r--Tools/Scripts/webkitpy/performance_tests/perftest.py60
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftest_unittest.py23
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner.py149
-rwxr-xr-xTools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py225
-rw-r--r--Tools/Scripts/webkitpy/test/finder.py27
-rw-r--r--Tools/Scripts/webkitpy/test/finder_unittest.py12
-rw-r--r--Tools/Scripts/webkitpy/test/main.py123
-rw-r--r--Tools/Scripts/webkitpy/test/main_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/test/printer.py27
-rw-r--r--Tools/Scripts/webkitpy/test/runner.py63
-rw-r--r--Tools/Scripts/webkitpy/test/runner_unittest.py27
-rw-r--r--Tools/Scripts/webkitpy/thirdparty/__init__.py55
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/irc_command.py12
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/ircbot.py (renamed from Tools/Scripts/webkitpy/tool/bot/sheriffircbot.py)38
-rw-r--r--Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py (renamed from Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py)24
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/__init__.py1
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py8
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py1
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/perfalizer.py215
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py111
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues.py20
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/queues_unittest.py18
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline.py13
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py12
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/sheriffbot.py5
-rw-r--r--Tools/Scripts/webkitpy/tool/commands/upload_unittest.py4
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/runtests.py3
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py2
-rw-r--r--Tools/Scripts/webkitpy/tool/steps/update_unittest.py8
-rw-r--r--Tools/Scripts/webkitpy/webkitpy.pyproj4
85 files changed, 3125 insertions, 2860 deletions
diff --git a/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py b/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
index 8b6c76499..9dd01e8d4 100644
--- a/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
+++ b/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
@@ -69,10 +69,10 @@ class MockSCM(object):
return ["MockFile1"]
def head_svn_revision(self):
- return 1234
+ return '1234'
def svn_revision(self, path):
- return 5678
+ return '5678'
def create_patch(self, git_commit, changed_files=None):
return "Patch1"
diff --git a/Tools/Scripts/webkitpy/common/config/committers.py b/Tools/Scripts/webkitpy/common/config/committers.py
index b1b8a3fac..ddfddf9a9 100644
--- a/Tools/Scripts/webkitpy/common/config/committers.py
+++ b/Tools/Scripts/webkitpy/common/config/committers.py
@@ -127,6 +127,7 @@ contributors_who_are_not_committers = [
Contributor("Dongsung Huang", "luxtella@company100.net", "Huang"),
Contributor("Douglas Davidson", "ddavidso@apple.com"),
Contributor("Edward O'Connor", "eoconnor@apple.com", "hober"),
+ Contributor("Elliott Sprehn", "esprehn@chromium.org", "esprehn"),
Contributor("Eric Penner", "epenner@chromium.org", "epenner"),
Contributor("Felician Marton", ["felician@inf.u-szeged.hu", "marton.felician.zoltan@stud.u-szeged.hu"], "Felician"),
Contributor("Finnur Thorarinsson", ["finnur@chromium.org", "finnur.webkit@gmail.com"], "finnur"),
@@ -136,6 +137,7 @@ contributors_who_are_not_committers = [
Contributor("Hao Zheng", "zhenghao@chromium.org"),
Contributor("Ian Hickson", "ian@hixie.ch", "hixie"),
Contributor("Janos Badics", "jbadics@inf.u-szeged.hu", 'dicska'),
+ Contributor("Jeff Timanus", ["twiz@chromium.org", "twiz@google.com"], "twiz"),
Contributor("Jing Zhao", "jingzhao@chromium.org"),
Contributor("John Bates", ["jbates@google.com", "jbates@chromium.org"], "jbates"),
Contributor("John Bauman", ["jbauman@chromium.org", "jbauman@google.com"], "jbauman"),
@@ -182,7 +184,6 @@ committers_unable_to_review = [
Committer("Alec Flett", ["alecflett@chromium.org", "alecflett@google.com"], "alecf"),
Committer(u"Alexander F\u00e6r\u00f8y", ["ahf@0x90.dk", "alexander.faeroy@nokia.com"], "ahf"),
Committer("Alexander Kellett", ["lypanov@mac.com", "a-lists001@lypanov.net", "lypanov@kde.org"], "lypanov"),
- Committer("Alexander Pavlov", "apavlov@chromium.org", "apavlov"),
Committer("Alexandru Chiculita", "achicu@adobe.com", "achicu"),
Committer("Alice Boxhall", "aboxhall@chromium.org", "aboxhall"),
Committer("Allan Sandfeld Jensen", ["allan.jensen@nokia.com", "kde@carewolf.com", "sandfeld@kde.org"], "carewolf"),
@@ -202,6 +203,7 @@ committers_unable_to_review = [
Committer("Anton D'Auria", "adauria@apple.com", "antonlefou"),
Committer("Anton Muhin", "antonm@chromium.org", "antonm"),
Committer("Arko Saha", "arko@motorola.com", "arkos"),
+ Committer("Arvid Nilsson", "anilsson@rim.com", "anilsson"),
Committer("Balazs Kelemen", "kbalazs@webkit.org", "kbalazs"),
Committer("Ben Murdoch", "benm@google.com", "benm"),
Committer("Ben Wells", "benwells@chromium.org", "benwells"),
@@ -415,6 +417,7 @@ reviewers_list = [
Reviewer("Adele Peterson", "adele@apple.com", "adele"),
Reviewer("Adrienne Walker", ["enne@google.com", "enne@chromium.org"], "enne"),
Reviewer("Alejandro G. Castro", ["alex@igalia.com", "alex@webkit.org"], "alexg__"),
+ Reviewer("Alexander Pavlov", ["apavlov@chromium.org", "pavlov81@gmail.com"], "apavlov"),
Reviewer("Alexey Proskuryakov", ["ap@webkit.org", "ap@apple.com"], "ap"),
Reviewer("Alexis Menard", ["alexis.menard@openbossa.org", "menard@kde.org"], "darktears"),
Reviewer("Alice Liu", "alice.liu@apple.com", "aliu"),
diff --git a/Tools/Scripts/webkitpy/common/config/ports.py b/Tools/Scripts/webkitpy/common/config/ports.py
index c086238fd..1d76b4218 100644
--- a/Tools/Scripts/webkitpy/common/config/ports.py
+++ b/Tools/Scripts/webkitpy/common/config/ports.py
@@ -59,6 +59,7 @@ class DeprecatedPort(object):
def port(port_name):
ports = {
"chromium": ChromiumPort,
+ "chromium-android": ChromiumAndroidPort,
"chromium-xvfb": ChromiumXVFBPort,
"gtk": GtkPort,
"mac": MacPort,
@@ -193,6 +194,20 @@ class ChromiumPort(DeprecatedPort):
return None
+class ChromiumAndroidPort(ChromiumPort):
+ port_flag_name = "chromium-android"
+
+ def update_webkit_command(self, non_interactive=False):
+ command = super(ChromiumAndroidPort, self).update_webkit_command(non_interactive=non_interactive)
+ command.append("--chromium-android")
+ return command
+
+ def build_webkit_command(self, build_style=None):
+ command = super(ChromiumAndroidPort, self).build_webkit_command(build_style=build_style)
+ command.append("--chromium-android")
+ return command
+
+
class ChromiumXVFBPort(ChromiumPort):
port_flag_name = "chromium-xvfb"
diff --git a/Tools/Scripts/webkitpy/common/config/ports_unittest.py b/Tools/Scripts/webkitpy/common/config/ports_unittest.py
index e0b77c82e..df5bf7352 100644
--- a/Tools/Scripts/webkitpy/common/config/ports_unittest.py
+++ b/Tools/Scripts/webkitpy/common/config/ports_unittest.py
@@ -59,6 +59,10 @@ class DeprecatedPortTest(unittest.TestCase):
self.assertEquals(ChromiumPort().build_webkit_command(build_style="debug"), DeprecatedPort().script_shell_command("build-webkit") + ["--debug", "--chromium", "--update-chromium"])
self.assertEquals(ChromiumPort().update_webkit_command(), DeprecatedPort().script_shell_command("update-webkit") + ["--chromium"])
+ def test_chromium_android_port(self):
+ self.assertEquals(ChromiumAndroidPort().build_webkit_command(), ChromiumPort().build_webkit_command() + ["--chromium-android"])
+ self.assertEquals(ChromiumAndroidPort().update_webkit_command(), ChromiumPort().update_webkit_command() + ["--chromium-android"])
+
def test_chromium_xvfb_port(self):
self.assertEquals(ChromiumXVFBPort().run_webkit_tests_command(), ['xvfb-run'] + DeprecatedPort().script_shell_command('new-run-webkit-tests') + ['--chromium', '--skip-failing-tests'])
diff --git a/Tools/Scripts/webkitpy/common/config/watchlist b/Tools/Scripts/webkitpy/common/config/watchlist
index 127772c80..e5c9c2776 100755
--- a/Tools/Scripts/webkitpy/common/config/watchlist
+++ b/Tools/Scripts/webkitpy/common/config/watchlist
@@ -12,9 +12,6 @@
#
{
"DEFINITIONS": {
- "ChromiumDumpRenderTree": {
- "filename": r"Tools/DumpRenderTree/chromium/",
- },
"ChromiumGraphics": {
"filename": r"Source/WebCore/platform/graphics/chromium/",
},
@@ -26,13 +23,11 @@
"filename": r"Source/WebCore/bindings/objc/PublicDOMInterfaces.h"
},
"Forms": {
- "filename": r"Source/WebCore/html/HTML(DataList|FieldSet|Form|FormControl|Input|Keygen|Label"
- r"|Legend|OptGroup|Option|Output|Select|TextArea|TextFormControl)Element\."
- r"|Source/WebCore/html/FormAssociatedElement\."
+ "filename": r"Source/WebCore/html/HTML(DataList|FieldSet|Input|Keygen|Label|Legend|OptGroup|Option|Output|Select|TextArea)Element\."
+ r"|Source/WebCore/html/.*Form[A-Z].*\."
r"|Source/WebCore/html/\w*InputType\."
r"|Source/WebCore/html/shadow/(SliderThumbElement|TextControlInnerElements)\."
- r"|Source/WebCore/rendering/Render(FileUploadControl|ListBox|MenuList|Slider|TextControl"
- r"|TextControlMultiLine|TextControlSingleLine)\."
+ r"|Source/WebCore/rendering/Render(FileUploadControl|ListBox|MenuList|Slider|TextControl.*)\."
},
"GStreamerGraphics": {
"filename": r"Source/WebCore/platform/graphics/gstreamer/",
@@ -69,6 +64,10 @@
"less": r"[Ss]ecurityOrigin(?!\.(h|cpp))",
"filename": r"XSS|[Ss]ecurity",
},
+ "SkiaGraphics": {
+ "filename": r"Source/WebCore/platform/graphics/skia/"
+ r"|Source/WebCore/platform/graphics/filters/skia/",
+ },
"V8Bindings": {
"filename": r"Source/WebCore/bindings/v8/",
},
@@ -224,7 +223,6 @@
"BlackBerry": [ "mifenton@rim.com" ],
"CMake": [ "rakuco@webkit.org", "gyuyoung.kim@samsung.com" ],
"CSS": [ "alexis.menard@openbossa.org", "macpherson@chromium.org", "cmarcelo@webkit.org" ],
- "ChromiumDumpRenderTree": [ "tkent@chromium.org", ],
"ChromiumGraphics": [ "jamesr@chromium.org", "cc-bugs@google.com" ],
"ChromiumPublicApi": [ "abarth@webkit.org", "dglazkov@chromium.org", "fishd@chromium.org", "jamesr@chromium.org", "tkent+wkapi@chromium.org" ],
"DOMAttributes": [ "cmarcelo@webkit.org", ],
@@ -233,14 +231,15 @@
"EFLWebKit2PublicAPI": [ "gyuyoung.kim@samsung.com" ],
"Editing": [ "mifenton@rim.com" ],
"Forms": [ "tkent@chromium.org", "mifenton@rim.com" ],
- "FrameLoader": [ "abarth@webkit.org", "japhet@chromium.org", "jochen@chromium.org" ],
+ "FrameLoader": [ "abarth@webkit.org", "japhet@chromium.org" ],
"GStreamerGraphics": [ "alexis.menard@openbossa.org", "pnormand@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
"GtkWebKit2PublicAPI": [ "cgarcia@igalia.com", "gns@gnome.org", "mrobinson@webkit.org" ],
- "Loader": [ "japhet@chromium.org", "jochen@chromium.org" ],
+ "Loader": [ "japhet@chromium.org" ],
"MathML": [ "dbarton@mathscribe.com" ],
"Media": [ "feature-media-reviews@chromium.org", "eric.carlson@apple.com" ],
"NetworkInfo": [ "gyuyoung.kim@samsung.com" ],
"OpenGL" : [ "noam.rosenthal@nokia.com" ],
+ "SkiaGraphics": [ "senorblanco@chromium.org" ],
"QtBuildSystem" : [ "vestbo@webkit.org", ],
"QtGraphics" : [ "noam.rosenthal@nokia.com" ],
"QtWebKit2PlatformSpecific": [ "alexis.menard@openbossa.org", "zoltan@webkit.org", "cmarcelo@webkit.org" ],
@@ -253,7 +252,7 @@
"TestFailures": [ "abarth@webkit.org", "dglazkov@chromium.org" ],
"TextureMapper" : [ "noam.rosenthal@nokia.com" ],
"ThreadingFiles|ThreadingUsage": [ "levin+threading@chromium.org", ],
- "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org", "jochen@chromium.org" ],
+ "V8Bindings|BindingsScripts": [ "abarth@webkit.org", "japhet@chromium.org", "haraken@chromium.org" ],
"WatchListScript": [ "levin+watchlist@chromium.org", ],
"WebIDL": [ "abarth@webkit.org", "ojan@chromium.org" ],
"WebKitGTKTranslations": [ "gns@gnome.org", "mrobinson@webkit.org" ],
diff --git a/Tools/Scripts/webkitpy/common/message_pool.py b/Tools/Scripts/webkitpy/common/message_pool.py
index 2d90cbe0b..2e1e85e5c 100644
--- a/Tools/Scripts/webkitpy/common/message_pool.py
+++ b/Tools/Scripts/webkitpy/common/message_pool.py
@@ -104,12 +104,22 @@ class _MessagePool(object):
host = self._host
for worker_number in xrange(self._num_workers):
- worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None)
+ worker = _Worker(host, self._messages_to_manager, self._messages_to_worker, self._worker_factory, worker_number, self._running_inline, self if self._running_inline else None, self._worker_log_level())
self._workers.append(worker)
worker.start()
if self._worker_startup_delay_secs:
time.sleep(self._worker_startup_delay_secs)
+ def _worker_log_level(self):
+ log_level = logging.NOTSET
+ for handler in logging.root.handlers:
+ if handler.level != logging.NOTSET:
+ if log_level == logging.NOTSET:
+ log_level = handler.level
+ else:
+ log_level = min(log_level, handler.level)
+ return log_level
+
def wait(self):
try:
self._start_workers()
@@ -192,12 +202,13 @@ class _Message(object):
class _Worker(multiprocessing.Process):
- def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager):
+ def __init__(self, host, messages_to_manager, messages_to_worker, worker_factory, worker_number, running_inline, manager, log_level):
super(_Worker, self).__init__()
self.host = host
self.worker_number = worker_number
self.name = 'worker/%d' % worker_number
self.log_messages = []
+ self.log_level = log_level
self._running_inline = running_inline
self._manager = manager
@@ -300,12 +311,14 @@ class _Worker(multiprocessing.Process):
self._log_handler = _WorkerLogHandler(self)
self._logger.addHandler(self._log_handler)
+ self._logger.setLevel(self.log_level)
class _WorkerLogHandler(logging.Handler):
def __init__(self, worker):
logging.Handler.__init__(self)
self._worker = worker
+ self.setLevel(worker.log_level)
def emit(self, record):
self._worker.log_messages.append(record)
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
index e48293b89..eecadf226 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla.py
@@ -578,19 +578,14 @@ class Bugzilla(object):
return file_object.name
return "bug-%s-%s.%s" % (bug_id, timestamp(), extension)
- def add_attachment_to_bug(self,
- bug_id,
- file_or_string,
- description,
- filename=None,
- comment_text=None):
+ def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
self.authenticate()
log('Adding attachment "%s" to %s' % (description, self.bug_url_for_bug_id(bug_id)))
self.browser.open(self.add_attachment_url(bug_id))
self.browser.select_form(name="entryform")
file_object = self._file_object_for_upload(file_or_string)
filename = filename or self._filename_for_upload(file_object, bug_id)
- self._fill_attachment_form(description, file_object, filename=filename)
+ self._fill_attachment_form(description, file_object, filename=filename, mimetype=mimetype)
if comment_text:
log(comment_text)
self.browser['comment'] = comment_text
diff --git a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
index cd12c8033..71b080ce9 100644
--- a/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
+++ b/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_mock.py
@@ -393,13 +393,9 @@ class MockBugzilla(object):
log("MOCK bug comment: bug_id=%s, cc=%s\n--- Begin comment ---\n%s\n--- End comment ---\n" % (
bug_id, cc, comment_text))
- def add_attachment_to_bug(self,
- bug_id,
- file_or_string,
- description,
- filename=None,
- comment_text=None):
- log("MOCK add_attachment_to_bug: bug_id=%s, description=%s filename=%s" % (bug_id, description, filename))
+ def add_attachment_to_bug(self, bug_id, file_or_string, description, filename=None, comment_text=None, mimetype=None):
+ log("MOCK add_attachment_to_bug: bug_id=%s, description=%s filename=%s mimetype=%s" %
+ (bug_id, description, filename, mimetype))
if comment_text:
log("-- Begin comment --")
log(comment_text)
diff --git a/Tools/Scripts/webkitpy/common/net/credentials.py b/Tools/Scripts/webkitpy/common/net/credentials.py
index d76405b39..21aeaeafe 100644
--- a/Tools/Scripts/webkitpy/common/net/credentials.py
+++ b/Tools/Scripts/webkitpy/common/net/credentials.py
@@ -131,9 +131,12 @@ class Credentials(object):
return
if not User().confirm("Store password in system keyring?", User.DEFAULT_NO):
return
- self._keyring.set_password(self.host, username, password)
+ try:
+ self._keyring.set_password(self.host, username, password)
+ except:
+ pass
- def read_credentials(self):
+ def read_credentials(self, user=User):
username, password = self._credentials_from_environment()
# FIXME: We don't currently support pulling the username from one
# source and the password from a separate source.
@@ -142,13 +145,17 @@ class Credentials(object):
if not username or not password:
username, password = self._credentials_from_keychain(username)
+ if not username:
+ username = user.prompt("%s login: " % self.host)
+
if username and not password and self._keyring:
- password = self._keyring.get_password(self.host, username)
+ try:
+ password = self._keyring.get_password(self.host, username)
+ except:
+ pass
- if not username:
- username = User.prompt("%s login: " % self.host)
if not password:
- password = User.prompt_password("%s password for %s: " % (self.host, username))
+ password = user.prompt_password("%s password for %s: " % (self.host, username))
self._offer_to_store_credentials_in_keyring(username, password)
return (username, password)
diff --git a/Tools/Scripts/webkitpy/common/net/credentials_unittest.py b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
index 2ab160c88..a797e3d1b 100644
--- a/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
+++ b/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
@@ -32,6 +32,7 @@ import unittest
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.common.system.user_mock import MockUser
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive
@@ -179,6 +180,33 @@ password: "SECRETSAUCE"
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "NOMNOMNOM"))
+ def test_keyring_without_git_repo_nor_keychain(self):
+ class MockKeyring(object):
+ def get_password(self, host, username):
+ return "NOMNOMNOM"
+
+ class FakeCredentials(MockedCredentials):
+ def _credentials_from_keychain(self, username):
+ return (None, None)
+
+ def _credentials_from_environment(self):
+ return (None, None)
+
+ class FakeUser(MockUser):
+ @classmethod
+ def prompt(cls, message, repeat=1, raw_input=raw_input):
+ return "test@webkit.org"
+
+ @classmethod
+ def prompt_password(cls, message, repeat=1, raw_input=raw_input):
+ raise AssertionError("should not prompt for password")
+
+ with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
+ credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
+ # FIXME: Using read_credentials here seems too broad as higher-priority
+ # credential source could be affected by the user's environment.
+ self.assertEqual(credentials.read_credentials(FakeUser), ("test@webkit.org", "NOMNOMNOM"))
+
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/common/prettypatch_unittest.py b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
index 6a3c79a7a..37fa844fd 100644
--- a/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
+++ b/Tools/Scripts/webkitpy/common/prettypatch_unittest.py
@@ -27,6 +27,7 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
+import sys
import unittest
from webkitpy.common.system.executive import Executive
@@ -69,6 +70,10 @@ Index: latin1_test
if not self.check_ruby():
return
+ if sys.platform == 'win32':
+ # FIXME: disabled due to https://bugs.webkit.org/show_bug.cgi?id=93192
+ return
+
pretty_patch = PrettyPatch(Executive(), self._webkit_root())
pretty = pretty_patch.pretty_diff(self._diff_with_multiple_encodings)
self.assertTrue(pretty) # We got some output
diff --git a/Tools/Scripts/webkitpy/common/system/autoinstall.py b/Tools/Scripts/webkitpy/common/system/autoinstall.py
index e5bc0b2cb..00aff83ff 100755
--- a/Tools/Scripts/webkitpy/common/system/autoinstall.py
+++ b/Tools/Scripts/webkitpy/common/system/autoinstall.py
@@ -456,7 +456,7 @@ class AutoInstaller(object):
_log.debug('URL for %s already downloaded. Skipping...'
% target_name)
_log.debug(' "%s"' % url)
- return
+ return False
self._log_transfer("Auto-installing package: %s" % target_name,
url, target_path, log_method=_log.info)
@@ -484,6 +484,7 @@ class AutoInstaller(object):
shutil.rmtree(scratch_dir)
_log.debug('Auto-installed %s to:' % target_name)
_log.debug(' "%s"' % target_path)
+ return True
if __name__=="__main__":
diff --git a/Tools/Scripts/webkitpy/common/system/executive_unittest.py b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
index c041b6372..79a8281cd 100644
--- a/Tools/Scripts/webkitpy/common/system/executive_unittest.py
+++ b/Tools/Scripts/webkitpy/common/system/executive_unittest.py
@@ -152,7 +152,7 @@ class ExecutiveTest(unittest.TestCase):
output = executive.run_and_throw_if_fail(command_line('echo', unicode_tor_input), quiet=True, decode_output=False)
self.assertEquals(output, encoded_tor)
- def test_kill_process(self):
+ def serial_test_kill_process(self):
executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
self.assertEqual(process.poll(), None) # Process is running
@@ -169,8 +169,8 @@ class ExecutiveTest(unittest.TestCase):
# Killing again should fail silently.
executive.kill_process(process.pid)
- # Now test kill_all ; we do this in the same test as kill
- # so that we don't collide when running tests in parallel.
+ def serial_test_kill_all(self):
+ executive = Executive()
process = subprocess.Popen(never_ending_command(), stdout=subprocess.PIPE)
self.assertEqual(process.poll(), None) # Process is running
executive.kill_all(never_ending_command()[0])
@@ -202,13 +202,13 @@ class ExecutiveTest(unittest.TestCase):
self._assert_windows_image_name("foo.baz", "foo.baz")
self._assert_windows_image_name("foo.baz.exe", "foo.baz.exe")
- def test_check_running_pid(self):
+ def serial_test_check_running_pid(self):
executive = Executive()
self.assertTrue(executive.check_running_pid(os.getpid()))
# Maximum pid number on Linux is 32768 by default
self.assertFalse(executive.check_running_pid(100000))
- def test_running_pids(self):
+ def serial_test_running_pids(self):
if sys.platform in ("win32", "cygwin"):
return # This function isn't implemented on Windows yet.
@@ -216,7 +216,9 @@ class ExecutiveTest(unittest.TestCase):
pids = executive.running_pids()
self.assertTrue(os.getpid() in pids)
- def test_run_in_parallel(self):
+ def serial_test_run_in_parallel(self):
+ # We run this test serially to avoid overloading the machine and throwing off the timing.
+
if sys.platform in ("win32", "cygwin"):
return # This function isn't implemented properly on windows yet.
import multiprocessing
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem.py b/Tools/Scripts/webkitpy/common/system/filesystem.py
index 687a31322..d8ee167ae 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem.py
@@ -262,6 +262,9 @@ class FileSystem(object):
"""Delete the directory rooted at path, whether empty or not."""
shutil.rmtree(path, ignore_errors=True)
+ def copytree(self, source, destination):
+ shutil.copytree(source, destination)
+
def split(self, path):
"""Return (dirname, basename + '.' + ext)"""
return os.path.split(path)
diff --git a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
index a4eb695bf..c2d823a81 100644
--- a/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
+++ b/Tools/Scripts/webkitpy/common/system/filesystem_mock.py
@@ -378,6 +378,16 @@ class MockFileSystem(object):
self.dirs = set(filter(lambda d: not d.startswith(path), self.dirs))
+ def copytree(self, source, destination):
+ source = self.normpath(source)
+ destination = self.normpath(destination)
+
+ for source_file in self.files:
+ if source_file.startswith(source):
+ destination_path = self.join(destination, self.relpath(source_file, source))
+ self.maybe_make_directory(self.dirname(destination_path))
+ self.files[destination_path] = self.files[source_file]
+
def split(self, path):
idx = path.rfind(self.sep)
if idx == -1:
diff --git a/Tools/Scripts/webkitpy/common/system/outputcapture.py b/Tools/Scripts/webkitpy/common/system/outputcapture.py
index 4f931b7d1..78a12f0e2 100644
--- a/Tools/Scripts/webkitpy/common/system/outputcapture.py
+++ b/Tools/Scripts/webkitpy/common/system/outputcapture.py
@@ -68,11 +68,15 @@ class OutputCapture(object):
self._logs = StringIO()
self._logs_handler = logging.StreamHandler(self._logs)
self._logs_handler.setLevel(self._log_level)
- logging.getLogger().addHandler(self._logs_handler)
+ self._logger = logging.getLogger()
+ self._orig_log_level = self._logger.level
+ self._logger.addHandler(self._logs_handler)
+ self._logger.setLevel(min(self._log_level, self._orig_log_level))
return (self._capture_output_with_name("stdout"), self._capture_output_with_name("stderr"))
def restore_output(self):
- logging.getLogger().removeHandler(self._logs_handler)
+ self._logger.removeHandler(self._logs_handler)
+ self._logger.setLevel(self._orig_log_level)
self._logs_handler.flush()
self._logs.flush()
logs_string = self._logs.getvalue()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
new file mode 100644
index 000000000..a7b49831c
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py
@@ -0,0 +1,178 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import errno
+import logging
+import re
+
+from webkitpy.layout_tests.models import test_expectations
+
+
+_log = logging.getLogger(__name__)
+
+
+class LayoutTestFinder(object):
+ def __init__(self, port, options):
+ self._port = port
+ self._options = options
+ self._filesystem = self._port.host.filesystem
+ self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
+
+ def find_tests(self, options, args):
+ paths = self._strip_test_dir_prefixes(args)
+ if options.test_list:
+ paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
+ paths = set(paths)
+ test_files = self._port.tests(paths)
+ return (paths, test_files)
+
+ def _strip_test_dir_prefixes(self, paths):
+ return [self._strip_test_dir_prefix(path) for path in paths if path]
+
+ def _strip_test_dir_prefix(self, path):
+ # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
+ # the filesystem uses '\\' as a directory separator.
+ if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
+ return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
+ if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
+ return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
+ return path
+
+ def _read_test_names_from_file(self, filenames, test_path_separator):
+ fs = self._filesystem
+ tests = []
+ for filename in filenames:
+ try:
+ if test_path_separator != fs.sep:
+ filename = filename.replace(test_path_separator, fs.sep)
+ file_contents = fs.read_text_file(filename).split('\n')
+ for line in file_contents:
+ line = self._strip_comments(line)
+ if line:
+ tests.append(line)
+ except IOError, e:
+ if e.errno == errno.ENOENT:
+ _log.critical('')
+ _log.critical('--test-list file "%s" not found' % file)
+ raise
+ return tests
+
+ @staticmethod
+ def _strip_comments(line):
+ commentIndex = line.find('//')
+ if commentIndex is -1:
+ commentIndex = len(line)
+
+ line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
+ if line == '':
+ return None
+ else:
+ return line
+
+ def skip_tests(self, paths, all_tests_list, expectations, http_tests):
+ all_tests = set(all_tests_list)
+
+ tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
+ if self._options.skip_failing_tests:
+ tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
+ tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
+
+ if self._options.skipped == 'only':
+ tests_to_skip = all_tests - tests_to_skip
+ elif self._options.skipped == 'ignore':
+ tests_to_skip = set()
+ elif self._options.skipped == 'default':
+ pass # listed for completeness
+
+ # make sure we're explicitly running any tests passed on the command line.
+ tests_to_skip -= paths
+
+ # unless of course we don't want to run the HTTP tests :)
+ if not self._options.http:
+ tests_to_skip.update(set(http_tests))
+
+ return tests_to_skip
+
+ def split_into_chunks(self, test_names):
+ """split into a list to run and a set to skip, based on --run-chunk and --run-part."""
+ if not self._options.run_chunk and not self._options.run_part:
+ return test_names, set()
+
+ # If the user specifies they just want to run a subset of the tests,
+ # just grab a subset of the non-skipped tests.
+ chunk_value = self._options.run_chunk or self._options.run_part
+ try:
+ (chunk_num, chunk_len) = chunk_value.split(":")
+ chunk_num = int(chunk_num)
+ assert(chunk_num >= 0)
+ test_size = int(chunk_len)
+ assert(test_size > 0)
+ except AssertionError:
+ _log.critical("invalid chunk '%s'" % chunk_value)
+ return (None, None)
+
+ # Get the number of tests
+ num_tests = len(test_names)
+
+ # Get the start offset of the slice.
+ if self._options.run_chunk:
+ chunk_len = test_size
+ # In this case chunk_num can be really large. We need
+ # to make the slave fit in the current number of tests.
+ slice_start = (chunk_num * chunk_len) % num_tests
+ else:
+ # Validate the data.
+ assert(test_size <= num_tests)
+ assert(chunk_num <= test_size)
+
+ # To count the chunk_len, and make sure we don't skip
+ # some tests, we round to the next value that fits exactly
+ # all the parts.
+ rounded_tests = num_tests
+ if rounded_tests % test_size != 0:
+ rounded_tests = (num_tests + test_size - (num_tests % test_size))
+
+ chunk_len = rounded_tests / test_size
+ slice_start = chunk_len * (chunk_num - 1)
+ # It does not mind if we go over test_size.
+
+ # Get the end offset of the slice.
+ slice_end = min(num_tests, slice_start + chunk_len)
+
+ tests_to_run = test_names[slice_start:slice_end]
+
+ _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
+
+ # If we reached the end and we don't have enough tests, we run some
+ # from the beginning.
+ if slice_end - slice_start < chunk_len:
+ extra = chunk_len - (slice_end - slice_start)
+ _log.debug(' last chunk is partial, appending [0:%d]' % extra)
+ tests_to_run.extend(test_names[0:extra])
+
+ return (tests_to_run, set(test_names) - set(tests_to_run))
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
new file mode 100644
index 000000000..ae3422561
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner.py
@@ -0,0 +1,627 @@
+# Copyright (C) 2011 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import logging
+import math
+import re
+import threading
+import time
+
+from webkitpy.common import message_pool
+from webkitpy.layout_tests.controllers import single_test_runner
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models import test_results
+from webkitpy.tool import grammar
+
+
+_log = logging.getLogger(__name__)
+
+
+TestExpectations = test_expectations.TestExpectations
+
+# Export this so callers don't need to know about message pools.
+WorkerException = message_pool.WorkerException
+
+
+class TestRunInterruptedException(Exception):
+ """Raised when a test run should be stopped immediately."""
+ def __init__(self, reason):
+ Exception.__init__(self)
+ self.reason = reason
+ self.msg = reason
+
+ def __reduce__(self):
+ return self.__class__, (self.reason,)
+
+
+class LayoutTestRunner(object):
+ def __init__(self, options, port, printer, results_directory, expectations, test_is_slow_fn):
+ self._options = options
+ self._port = port
+ self._printer = printer
+ self._results_directory = results_directory
+ self._expectations = None
+ self._test_is_slow = test_is_slow_fn
+ self._sharder = Sharder(self._port.split_test, self._port.TEST_PATH_SEPARATOR, self._options.max_locked_shards)
+
+ self._current_result_summary = None
+ self._needs_http = None
+ self._needs_websockets = None
+ self._retrying = False
+ self._test_files_list = []
+ self._all_results = []
+ self._group_stats = {}
+ self._worker_stats = {}
+ self._filesystem = self._port.host.filesystem
+
+ def test_key(self, test_name):
+ return self._sharder.test_key(test_name)
+
+ def run_tests(self, test_inputs, expectations, result_summary, num_workers, needs_http, needs_websockets, retrying):
+ """Returns a tuple of (interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings):
+ interrupted is whether the run was interrupted
+ keyboard_interrupted is whether the interruption was because someone typed Ctrl^C
+ thread_timings is a list of dicts with the total runtime
+ of each thread with 'name', 'num_tests', 'total_time' properties
+ test_timings is a list of timings for each sharded subdirectory
+ of the form [time, directory_name, num_tests]
+ individual_test_timings is a list of run times for each test
+ in the form {filename:filename, test_run_time:test_run_time}
+ result_summary: summary object to populate with the results
+ """
+ self._current_result_summary = result_summary
+ self._expectations = expectations
+ self._needs_http = needs_http
+ self._needs_websockets = needs_websockets
+ self._retrying = retrying
+ self._test_files_list = [test_input.test_name for test_input in test_inputs]
+ self._printer.num_tests = len(self._test_files_list)
+ self._printer.num_completed = 0
+
+ self._all_results = []
+ self._group_stats = {}
+ self._worker_stats = {}
+ self._has_http_lock = False
+ self._remaining_locked_shards = []
+
+ keyboard_interrupted = False
+ interrupted = False
+
+ self._printer.write_update('Sharding tests ...')
+ locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel)
+
+ # FIXME: We don't have a good way to coordinate the workers so that
+ # they don't try to run the shards that need a lock if we don't actually
+ # have the lock. The easiest solution at the moment is to grab the
+ # lock at the beginning of the run, and then run all of the locked
+ # shards first. This minimizes the time spent holding the lock, but
+ # means that we won't be running tests while we're waiting for the lock.
+ # If this becomes a problem in practice we'll need to change this.
+
+ all_shards = locked_shards + unlocked_shards
+ self._remaining_locked_shards = locked_shards
+ if locked_shards and self._options.http:
+ self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))
+
+ num_workers = min(num_workers, len(all_shards))
+ self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
+
+ if self._options.dry_run:
+ return (keyboard_interrupted, interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
+
+ self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
+
+ try:
+ with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
+ pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
+ except KeyboardInterrupt:
+ self._printer.flush()
+ self._printer.writeln('Interrupted, exiting ...')
+ keyboard_interrupted = True
+ except TestRunInterruptedException, e:
+ _log.warning(e.reason)
+ interrupted = True
+ except Exception, e:
+ _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e)))
+ raise
+ finally:
+ self.stop_servers_with_lock()
+
+ # FIXME: should this be a class instead of a tuple?
+ return (interrupted, keyboard_interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
+
+ def _worker_factory(self, worker_connection):
+ results_directory = self._results_directory
+ if self._retrying:
+ self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
+ results_directory = self._filesystem.join(self._results_directory, 'retries')
+ return Worker(worker_connection, results_directory, self._options)
+
+ def _mark_interrupted_tests_as_skipped(self, result_summary):
+ for test_name in self._test_files_list:
+ if test_name not in result_summary.results:
+ result = test_results.TestResult(test_name, [test_failures.FailureEarlyExit()])
+ # FIXME: We probably need to loop here if there are multiple iterations.
+ # FIXME: Also, these results are really neither expected nor unexpected. We probably
+ # need a third type of result.
+ result_summary.add(result, expected=False, test_is_slow=self._test_is_slow(test_name))
+
+ def _interrupt_if_at_failure_limits(self, result_summary):
+ # Note: The messages in this method are constructed to match old-run-webkit-tests
+ # so that existing buildbot grep rules work.
+ def interrupt_if_at_failure_limit(limit, failure_count, result_summary, message):
+ if limit and failure_count >= limit:
+ message += " %d tests run." % (result_summary.expected + result_summary.unexpected)
+ self._mark_interrupted_tests_as_skipped(result_summary)
+ raise TestRunInterruptedException(message)
+
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_failures,
+ result_summary.unexpected_failures,
+ result_summary,
+ "Exiting early after %d failures." % result_summary.unexpected_failures)
+ interrupt_if_at_failure_limit(
+ self._options.exit_after_n_crashes_or_timeouts,
+ result_summary.unexpected_crashes + result_summary.unexpected_timeouts,
+ result_summary,
+ # This differs from ORWT because it does not include WebProcess crashes.
+ "Exiting early after %d crashes and %d timeouts." % (result_summary.unexpected_crashes, result_summary.unexpected_timeouts))
+
+ def _update_summary_with_result(self, result_summary, result):
+ if result.type == test_expectations.SKIP:
+ exp_str = got_str = 'SKIP'
+ expected = True
+ else:
+ expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or test_failures.is_reftest_failure(result.failures))
+ exp_str = self._expectations.get_expectations_string(result.test_name)
+ got_str = self._expectations.expectation_to_string(result.type)
+
+ result_summary.add(result, expected, self._test_is_slow(result.test_name))
+
+ self._printer.print_finished_test(result, expected, exp_str, got_str)
+
+ self._interrupt_if_at_failure_limits(result_summary)
+
+ def start_servers_with_lock(self, number_of_servers):
+ self._printer.write_update('Acquiring http lock ...')
+ self._port.acquire_http_lock()
+ if self._needs_http:
+ self._printer.write_update('Starting HTTP server ...')
+ self._port.start_http_server(number_of_servers=number_of_servers)
+ if self._needs_websockets:
+ self._printer.write_update('Starting WebSocket server ...')
+ self._port.start_websocket_server()
+ self._has_http_lock = True
+
+ def stop_servers_with_lock(self):
+ if self._has_http_lock:
+ if self._needs_http:
+ self._printer.write_update('Stopping HTTP server ...')
+ self._port.stop_http_server()
+ if self._needs_websockets:
+ self._printer.write_update('Stopping WebSocket server ...')
+ self._port.stop_websocket_server()
+ self._printer.write_update('Releasing server lock ...')
+ self._port.release_http_lock()
+ self._has_http_lock = False
+
+ def handle(self, name, source, *args):
+ method = getattr(self, '_handle_' + name)
+ if method:
+ return method(source, *args)
+ raise AssertionError('unknown message %s received from %s, args=%s' % (name, source, repr(args)))
+
+ def _handle_started_test(self, worker_name, test_input, test_timeout_sec):
+ self._printer.print_started_test(test_input.test_name)
+
+ def _handle_finished_test_list(self, worker_name, list_name, num_tests, elapsed_time):
+ self._group_stats[list_name] = (num_tests, elapsed_time)
+
+ def find(name, test_lists):
+ for i in range(len(test_lists)):
+ if test_lists[i].name == name:
+ return i
+ return -1
+
+ index = find(list_name, self._remaining_locked_shards)
+ if index >= 0:
+ self._remaining_locked_shards.pop(index)
+ if not self._remaining_locked_shards:
+ self.stop_servers_with_lock()
+
+ def _handle_finished_test(self, worker_name, result, elapsed_time, log_messages=[]):
+ self._worker_stats.setdefault(worker_name, {'name': worker_name, 'num_tests': 0, 'total_time': 0})
+ self._worker_stats[worker_name]['total_time'] += elapsed_time
+ self._worker_stats[worker_name]['num_tests'] += 1
+ self._all_results.append(result)
+ self._update_summary_with_result(self._current_result_summary, result)
+
+
+class Worker(object):
+ def __init__(self, caller, results_directory, options):
+ self._caller = caller
+ self._worker_number = caller.worker_number
+ self._name = caller.name
+ self._results_directory = results_directory
+ self._options = options
+
+ # The remaining fields are initialized in start()
+ self._host = None
+ self._port = None
+ self._batch_size = None
+ self._batch_count = None
+ self._filesystem = None
+ self._driver = None
+ self._tests_run_file = None
+ self._tests_run_filename = None
+
+ def __del__(self):
+ self.stop()
+
+ def start(self):
+ """This method is called when the object is starting to be used and it is safe
+ for the object to create state that does not need to be pickled (usually this means
+ it is called in a child process)."""
+ self._host = self._caller.host
+ self._filesystem = self._host.filesystem
+ self._port = self._host.port_factory.get(self._options.platform, self._options)
+
+ self._batch_count = 0
+ self._batch_size = self._options.batch_size or 0
+ tests_run_filename = self._filesystem.join(self._results_directory, "tests_run%d.txt" % self._worker_number)
+ self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
+
+ def handle(self, name, source, test_list_name, test_inputs):
+ assert name == 'test_list'
+ start_time = time.time()
+ for test_input in test_inputs:
+ self._run_test(test_input)
+ elapsed_time = time.time() - start_time
+ self._caller.post('finished_test_list', test_list_name, len(test_inputs), elapsed_time)
+
+ def _update_test_input(self, test_input):
+ if test_input.reference_files is None:
+ # Lazy initialization.
+ test_input.reference_files = self._port.reference_files(test_input.test_name)
+ if test_input.reference_files:
+ test_input.should_run_pixel_test = True
+ else:
+ test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
+
+ def _run_test(self, test_input):
+ self._update_test_input(test_input)
+ test_timeout_sec = self._timeout(test_input)
+ start = time.time()
+ self._caller.post('started_test', test_input, test_timeout_sec)
+
+ result = self._run_test_with_timeout(test_input, test_timeout_sec)
+
+ elapsed_time = time.time() - start
+ self._caller.post('finished_test', result, elapsed_time)
+
+ self._clean_up_after_test(test_input, result)
+
+ def stop(self):
+ _log.debug("%s cleaning up" % self._name)
+ self._kill_driver()
+ if self._tests_run_file:
+ self._tests_run_file.close()
+ self._tests_run_file = None
+
+ def _timeout(self, test_input):
+ """Compute the appropriate timeout value for a test."""
+ # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
+ # larger than that. We also add a little more padding if we're
+ # running tests in a separate thread.
+ #
+ # Note that we need to convert the test timeout from a
+ # string value in milliseconds to a float for Python.
+ driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
+ if not self._options.run_singly:
+ return driver_timeout_sec
+
+ thread_padding_sec = 1.0
+ thread_timeout_sec = driver_timeout_sec + thread_padding_sec
+ return thread_timeout_sec
+
+ def _kill_driver(self):
+ # Be careful about how and when we kill the driver; if driver.stop()
+ # raises an exception, this routine may get re-entered via __del__.
+ driver = self._driver
+ self._driver = None
+ if driver:
+ _log.debug("%s killing driver" % self._name)
+ driver.stop()
+
+ def _run_test_with_timeout(self, test_input, timeout):
+ if self._options.run_singly:
+ return self._run_test_in_another_thread(test_input, timeout)
+ return self._run_test_in_this_thread(test_input)
+
+ def _clean_up_after_test(self, test_input, result):
+ self._batch_count += 1
+ test_name = test_input.test_name
+ self._tests_run_file.write(test_name + "\n")
+
+ if result.failures:
+ # Check and kill DumpRenderTree if we need to.
+ if any([f.driver_needs_restart() for f in result.failures]):
+ self._kill_driver()
+ # Reset the batch count since the shell just bounced.
+ self._batch_count = 0
+
+ # Print the error message(s).
+ _log.debug("%s %s failed:" % (self._name, test_name))
+ for f in result.failures:
+ _log.debug("%s %s" % (self._name, f.message()))
+ elif result.type == test_expectations.SKIP:
+ _log.debug("%s %s skipped" % (self._name, test_name))
+ else:
+ _log.debug("%s %s passed" % (self._name, test_name))
+
+ if self._batch_size > 0 and self._batch_count >= self._batch_size:
+ self._kill_driver()
+ self._batch_count = 0
+
+ def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
+ """Run a test in a separate thread, enforcing a hard time limit.
+
+ Since we can only detect the termination of a thread, not any internal
+ state or progress, we can only run per-test timeouts when running test
+ files singly.
+
+ Args:
+ test_input: Object containing the test filename and timeout
+ thread_timeout_sec: time to wait before killing the driver process.
+ Returns:
+ A TestResult
+ """
+ worker = self
+
+ driver = self._port.create_driver(self._worker_number)
+
+ class SingleTestThread(threading.Thread):
+ def __init__(self):
+ threading.Thread.__init__(self)
+ self.result = None
+
+ def run(self):
+ self.result = worker._run_single_test(driver, test_input)
+
+ thread = SingleTestThread()
+ thread.start()
+ thread.join(thread_timeout_sec)
+ result = thread.result
+ if thread.isAlive():
+ # If join() returned with the thread still running, the
+ # DumpRenderTree is completely hung and there's nothing
+ # more we can do with it. We have to kill all the
+ # DumpRenderTrees to free it up. If we're running more than
+ # one DumpRenderTree thread, we'll end up killing the other
+ # DumpRenderTrees too, introducing spurious crashes. We accept
+ # that tradeoff in order to avoid losing the rest of this
+ # thread's results.
+ _log.error('Test thread hung: killing all DumpRenderTrees')
+
+ driver.stop()
+
+ if not result:
+ result = test_results.TestResult(test_input.test_name, failures=[], test_run_time=0)
+ return result
+
+ def _run_test_in_this_thread(self, test_input):
+ """Run a single test file using a shared DumpRenderTree process.
+
+ Args:
+ test_input: Object containing the test filename, uri and timeout
+
+ Returns: a TestResult object.
+ """
+ if self._driver and self._driver.has_crashed():
+ self._kill_driver()
+ if not self._driver:
+ self._driver = self._port.create_driver(self._worker_number)
+ return self._run_single_test(self._driver, test_input)
+
+ def _run_single_test(self, driver, test_input):
+ return single_test_runner.run_single_test(self._port, self._options,
+ test_input, driver, self._name)
+
+
+class TestShard(object):
+ """A test shard is a named list of TestInputs."""
+
+ def __init__(self, name, test_inputs):
+ self.name = name
+ self.test_inputs = test_inputs
+ self.requires_lock = test_inputs[0].requires_lock
+
+ def __repr__(self):
+ return "TestShard(name='%s', test_inputs=%s, requires_lock=%s'" % (self.name, self.test_inputs, self.requires_lock)
+
+ def __eq__(self, other):
+ return self.name == other.name and self.test_inputs == other.test_inputs
+
+
+class Sharder(object):
+ def __init__(self, test_split_fn, test_path_separator, max_locked_shards):
+ self._split = test_split_fn
+ self._sep = test_path_separator
+ self._max_locked_shards = max_locked_shards
+
+ def shard_tests(self, test_inputs, num_workers, fully_parallel):
+ """Groups tests into batches.
+ This helps ensure that tests that depend on each other (aka bad tests!)
+ continue to run together as most cross-tests dependencies tend to
+ occur within the same directory.
+ Return:
+ Two list of TestShards. The first contains tests that must only be
+ run under the server lock, the second can be run whenever.
+ """
+
+ # FIXME: Move all of the sharding logic out of manager into its
+ # own class or module. Consider grouping it with the chunking logic
+ # in prepare_lists as well.
+ if num_workers == 1:
+ return self._shard_in_two(test_inputs)
+ elif fully_parallel:
+ return self._shard_every_file(test_inputs)
+ return self._shard_by_directory(test_inputs, num_workers)
+
+ def _shard_in_two(self, test_inputs):
+ """Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
+
+ This is used when there's only one worker, to minimize the per-shard overhead."""
+ locked_inputs = []
+ unlocked_inputs = []
+ for test_input in test_inputs:
+ if test_input.requires_lock:
+ locked_inputs.append(test_input)
+ else:
+ unlocked_inputs.append(test_input)
+
+ locked_shards = []
+ unlocked_shards = []
+ if locked_inputs:
+ locked_shards = [TestShard('locked_tests', locked_inputs)]
+ if unlocked_inputs:
+ unlocked_shards = [TestShard('unlocked_tests', unlocked_inputs)]
+
+ return locked_shards, unlocked_shards
+
+ def _shard_every_file(self, test_inputs):
+ """Returns two lists of shards, each shard containing a single test file.
+
+ This mode gets maximal parallelism at the cost of much higher flakiness."""
+ locked_shards = []
+ unlocked_shards = []
+ for test_input in test_inputs:
+ # Note that we use a '.' for the shard name; the name doesn't really
+ # matter, and the only other meaningful value would be the filename,
+ # which would be really redundant.
+ if test_input.requires_lock:
+ locked_shards.append(TestShard('.', [test_input]))
+ else:
+ unlocked_shards.append(TestShard('.', [test_input]))
+
+ return locked_shards, unlocked_shards
+
+ def _shard_by_directory(self, test_inputs, num_workers):
+ """Returns two lists of shards, each shard containing all the files in a directory.
+
+ This is the default mode, and gets as much parallelism as we can while
+ minimizing flakiness caused by inter-test dependencies."""
+ locked_shards = []
+ unlocked_shards = []
+ tests_by_dir = {}
+ # FIXME: Given that the tests are already sorted by directory,
+ # we can probably rewrite this to be clearer and faster.
+ for test_input in test_inputs:
+ directory = self._split(test_input.test_name)[0]
+ tests_by_dir.setdefault(directory, [])
+ tests_by_dir[directory].append(test_input)
+
+ for directory, test_inputs in tests_by_dir.iteritems():
+ shard = TestShard(directory, test_inputs)
+ if test_inputs[0].requires_lock:
+ locked_shards.append(shard)
+ else:
+ unlocked_shards.append(shard)
+
+ # Sort the shards by directory name.
+ locked_shards.sort(key=lambda shard: shard.name)
+ unlocked_shards.sort(key=lambda shard: shard.name)
+
+ # Put a ceiling on the number of locked shards, so that we
+ # don't hammer the servers too badly.
+
+ # FIXME: For now, limit to one shard or set it
+ # with the --max-locked-shards. After testing to make sure we
+ # can handle multiple shards, we should probably do something like
+ # limit this to no more than a quarter of all workers, e.g.:
+ # return max(math.ceil(num_workers / 4.0), 1)
+ return (self._resize_shards(locked_shards, self._max_locked_shards, 'locked_shard'),
+ unlocked_shards)
+
+ def _resize_shards(self, old_shards, max_new_shards, shard_name_prefix):
+ """Takes a list of shards and redistributes the tests into no more
+ than |max_new_shards| new shards."""
+
+ # This implementation assumes that each input shard only contains tests from a
+ # single directory, and that tests in each shard must remain together; as a
+ # result, a given input shard is never split between output shards.
+ #
+ # Each output shard contains the tests from one or more input shards and
+ # hence may contain tests from multiple directories.
+
+ def divide_and_round_up(numerator, divisor):
+ return int(math.ceil(float(numerator) / divisor))
+
+ def extract_and_flatten(shards):
+ test_inputs = []
+ for shard in shards:
+ test_inputs.extend(shard.test_inputs)
+ return test_inputs
+
+ def split_at(seq, index):
+ return (seq[:index], seq[index:])
+
+ num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
+ new_shards = []
+ remaining_shards = old_shards
+ while remaining_shards:
+ some_shards, remaining_shards = split_at(remaining_shards, num_old_per_new)
+ new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_shards) + 1), extract_and_flatten(some_shards)))
+ return new_shards
+
+ def test_key(self, test_name):
+ """Turns a test name into a list with two sublists, the natural key of the
+ dirname, and the natural key of the basename.
+
+ This can be used when sorting paths so that files in a directory.
+ directory are kept together rather than being mixed in with files in
+ subdirectories."""
+ dirname, basename = self._split(test_name)
+ return (self.natural_sort_key(dirname + self._sep), self.natural_sort_key(basename))
+
+ @staticmethod
+ def natural_sort_key(string_to_split):
+ """ Turns a string into a list of string and number chunks, i.e. "z23a" -> ["z", 23, "a"]
+
+ This can be used to implement "natural sort" order. See:
+ http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
+ http://nedbatchelder.com/blog/200712.html#e20071211T054956
+ """
+ def tryint(val):
+ try:
+ return int(val)
+ except ValueError:
+ return val
+
+ return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
new file mode 100644
index 000000000..4dd2ae7ae
--- /dev/null
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_runner_unittest.py
@@ -0,0 +1,375 @@
+#!/usr/bin/python
+# Copyright (C) 2012 Google Inc. All rights reserved.
+# Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests import run_webkit_tests
+from webkitpy.layout_tests.models import test_expectations
+from webkitpy.layout_tests.models import test_failures
+from webkitpy.layout_tests.models.result_summary import ResultSummary
+from webkitpy.layout_tests.models.test_input import TestInput
+from webkitpy.layout_tests.models.test_results import TestResult
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, Sharder, TestRunInterruptedException
+
+
+TestExpectations = test_expectations.TestExpectations
+
+
+class FakePrinter(object):
+ num_completed = 0
+ num_tests = 0
+
+ def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
+ pass
+
+ def print_started_test(self, test_name):
+ pass
+
+ def print_finished_test(self, result, expected, exp_str, got_str):
+ pass
+
+ def write(self, msg):
+ pass
+
+ def write_update(self, msg):
+ pass
+
+ def flush(self):
+ pass
+
+
+class LockCheckingRunner(LayoutTestRunner):
+ def __init__(self, port, options, printer, tester, http_lock):
+ super(LockCheckingRunner, self).__init__(options, port, printer, port.results_directory(), TestExpectations(port, []), lambda test_name: False)
+ self._finished_list_called = False
+ self._tester = tester
+ self._should_have_http_lock = http_lock
+
+ def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
+ if not self._finished_list_called:
+ self._tester.assertEquals(list_name, 'locked_tests')
+ self._tester.assertTrue(self._remaining_locked_shards)
+ self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
+
+ super(LockCheckingRunner, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
+
+ if not self._finished_list_called:
+ self._tester.assertEquals(self._remaining_locked_shards, [])
+ self._tester.assertFalse(self._has_http_lock)
+ self._finished_list_called = True
+
+
+class LayoutTestRunnerTests(unittest.TestCase):
+ def _runner(self, port=None):
+ # FIXME: we shouldn't have to use run_webkit_tests.py to get the options we need.
+ options = run_webkit_tests.parse_args(['--platform', 'test-mac-snowleopard'])[0]
+ options.child_processes = '1'
+
+ host = MockHost()
+ port = port or host.port_factory.get(options.platform, options=options)
+ return LockCheckingRunner(port, options, FakePrinter(), self, True)
+
+ def _result_summary(self, runner, tests):
+ return ResultSummary(TestExpectations(runner._port, tests), tests, 1, set())
+
+ def _run_tests(self, runner, tests):
+ test_inputs = [TestInput(test, 6000) for test in tests]
+ expectations = TestExpectations(runner._port, tests)
+ runner.run_tests(test_inputs, expectations, self._result_summary(runner, tests),
+ num_workers=1, needs_http=any('http' in test for test in tests), needs_websockets=any(['websocket' in test for test in tests]), retrying=False)
+
+ def test_http_locking(self):
+ runner = self._runner()
+ self._run_tests(runner, ['http/tests/passes/text.html', 'passes/text.html'])
+
+ def test_perf_locking(self):
+ runner = self._runner()
+ self._run_tests(runner, ['http/tests/passes/text.html', 'perf/foo/test.html'])
+
+ def test_interrupt_if_at_failure_limits(self):
+ runner = self._runner()
+ runner._options.exit_after_n_failures = None
+ runner._options.exit_after_n_crashes_or_times = None
+ test_names = ['passes/text.html', 'passes/image.html']
+ runner._test_files_list = test_names
+
+ result_summary = self._result_summary(runner, test_names)
+ result_summary.unexpected_failures = 100
+ result_summary.unexpected_crashes = 50
+ result_summary.unexpected_timeouts = 50
+ # No exception when the exit_after* options are None.
+ runner._interrupt_if_at_failure_limits(result_summary)
+
+ # No exception when we haven't hit the limit yet.
+ runner._options.exit_after_n_failures = 101
+ runner._options.exit_after_n_crashes_or_timeouts = 101
+ runner._interrupt_if_at_failure_limits(result_summary)
+
+ # Interrupt if we've exceeded either limit:
+ runner._options.exit_after_n_crashes_or_timeouts = 10
+ self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
+ self.assertEquals(result_summary.results['passes/text.html'].type, test_expectations.SKIP)
+ self.assertEquals(result_summary.results['passes/image.html'].type, test_expectations.SKIP)
+
+ runner._options.exit_after_n_crashes_or_timeouts = None
+ runner._options.exit_after_n_failures = 10
+ exception = self.assertRaises(TestRunInterruptedException, runner._interrupt_if_at_failure_limits, result_summary)
+
+ def test_update_summary_with_result(self):
+ # Reftests expected to be image mismatch should be respected when pixel_tests=False.
+ runner = self._runner()
+ runner._options.pixel_tests = False
+ test = 'failures/expected/reftest.html'
+ expectations = TestExpectations(runner._port, tests=[test])
+ runner._expectations = expectations
+ result_summary = ResultSummary(expectations, [test], 1, set())
+ result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()])
+ runner._update_summary_with_result(result_summary, result)
+ self.assertEquals(1, result_summary.expected)
+ self.assertEquals(0, result_summary.unexpected)
+
+ def test_servers_started(self):
+
+ def start_http_server(number_of_servers=None):
+ self.http_started = True
+
+ def start_websocket_server():
+ self.websocket_started = True
+
+ def stop_http_server():
+ self.http_stopped = True
+
+ def stop_websocket_server():
+ self.websocket_stopped = True
+
+ host = MockHost()
+ port = host.port_factory.get('test-mac-leopard')
+ port.start_http_server = start_http_server
+ port.start_websocket_server = start_websocket_server
+ port.stop_http_server = stop_http_server
+ port.stop_websocket_server = stop_websocket_server
+
+ self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+ runner = self._runner(port=port)
+ runner._needs_http = True
+ runner._needs_websockets = False
+ runner.start_servers_with_lock(number_of_servers=4)
+ self.assertEquals(self.http_started, True)
+ self.assertEquals(self.websocket_started, False)
+ runner.stop_servers_with_lock()
+ self.assertEquals(self.http_stopped, True)
+ self.assertEquals(self.websocket_stopped, False)
+
+ self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+ runner._needs_http = True
+ runner._needs_websockets = True
+ runner.start_servers_with_lock(number_of_servers=4)
+ self.assertEquals(self.http_started, True)
+ self.assertEquals(self.websocket_started, True)
+ runner.stop_servers_with_lock()
+ self.assertEquals(self.http_stopped, True)
+ self.assertEquals(self.websocket_stopped, True)
+
+ self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
+ runner._needs_http = False
+ runner._needs_websockets = False
+ runner.start_servers_with_lock(number_of_servers=4)
+ self.assertEquals(self.http_started, False)
+ self.assertEquals(self.websocket_started, False)
+ runner.stop_servers_with_lock()
+ self.assertEquals(self.http_stopped, False)
+ self.assertEquals(self.websocket_stopped, False)
+
+
+class SharderTests(unittest.TestCase):
+
+ test_list = [
+ "http/tests/websocket/tests/unicode.htm",
+ "animations/keyframes.html",
+ "http/tests/security/view-source-no-refresh.html",
+ "http/tests/websocket/tests/websocket-protocol-ignored.html",
+ "fast/css/display-none-inline-style-change-crash.html",
+ "http/tests/xmlhttprequest/supported-xml-content-types.html",
+ "dom/html/level2/html/HTMLAnchorElement03.html",
+ "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
+ "dom/html/level2/html/HTMLAnchorElement06.html",
+ "perf/object-keys.html",
+ ]
+
+ def get_test_input(self, test_file):
+ return TestInput(test_file, requires_lock=(test_file.startswith('http') or test_file.startswith('perf')))
+
+ def get_shards(self, num_workers, fully_parallel, test_list=None, max_locked_shards=1):
+ def split(test_name):
+ idx = test_name.rfind('/')
+ if idx != -1:
+ return (test_name[0:idx], test_name[idx + 1:])
+
+ self.sharder = Sharder(split, '/', max_locked_shards)
+ test_list = test_list or self.test_list
+ return self.sharder.shard_tests([self.get_test_input(test) for test in test_list], num_workers, fully_parallel)
+
+ def assert_shards(self, actual_shards, expected_shard_names):
+ self.assertEquals(len(actual_shards), len(expected_shard_names))
+ for i, shard in enumerate(actual_shards):
+ expected_shard_name, expected_test_names = expected_shard_names[i]
+ self.assertEquals(shard.name, expected_shard_name)
+ self.assertEquals([test_input.test_name for test_input in shard.test_inputs],
+ expected_test_names)
+
+ def test_shard_by_dir(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
+
+ # Note that although there are tests in multiple dirs that need locks,
+ # they are crammed into a single shard in order to reduce the # of
+ # workers hitting the server at once.
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+ self.assert_shards(unlocked,
+ [('animations', ['animations/keyframes.html']),
+ ('dom/html/level2/html', ['dom/html/level2/html/HTMLAnchorElement03.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html']),
+ ('fast/css', ['fast/css/display-none-inline-style-change-crash.html']),
+ ('ietestcenter/Javascript', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
+
+ def test_shard_every_file(self):
+ locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True)
+ self.assert_shards(locked,
+ [('.', ['http/tests/websocket/tests/unicode.htm']),
+ ('.', ['http/tests/security/view-source-no-refresh.html']),
+ ('.', ['http/tests/websocket/tests/websocket-protocol-ignored.html']),
+ ('.', ['http/tests/xmlhttprequest/supported-xml-content-types.html']),
+ ('.', ['perf/object-keys.html'])]),
+ self.assert_shards(unlocked,
+ [('.', ['animations/keyframes.html']),
+ ('.', ['fast/css/display-none-inline-style-change-crash.html']),
+ ('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
+ ('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
+ ('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+ def test_shard_in_two(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False)
+ self.assert_shards(locked,
+ [('locked_tests',
+ ['http/tests/websocket/tests/unicode.htm',
+ 'http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+ self.assert_shards(unlocked,
+ [('unlocked_tests',
+ ['animations/keyframes.html',
+ 'fast/css/display-none-inline-style-change-crash.html',
+ 'dom/html/level2/html/HTMLAnchorElement03.html',
+ 'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
+ 'dom/html/level2/html/HTMLAnchorElement06.html'])])
+
+ def test_shard_in_two_has_no_locked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+ test_list=['animations/keyframe.html'])
+ self.assertEquals(len(locked), 0)
+ self.assertEquals(len(unlocked), 1)
+
+ def test_shard_in_two_has_no_unlocked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
+ test_list=['http/tests/websocket/tests/unicode.htm'])
+ self.assertEquals(len(locked), 1)
+ self.assertEquals(len(unlocked), 0)
+
+ def test_multiple_locked_shards(self):
+ locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2)
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html']),
+ ('locked_shard_2',
+ ['http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+
+ locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False)
+ self.assert_shards(locked,
+ [('locked_shard_1',
+ ['http/tests/security/view-source-no-refresh.html',
+ 'http/tests/websocket/tests/unicode.htm',
+ 'http/tests/websocket/tests/websocket-protocol-ignored.html',
+ 'http/tests/xmlhttprequest/supported-xml-content-types.html',
+ 'perf/object-keys.html'])])
+
+
+class NaturalCompareTest(unittest.TestCase):
+ def assert_cmp(self, x, y, result):
+ self.assertEquals(cmp(Sharder.natural_sort_key(x), Sharder.natural_sort_key(y)), result)
+
+ def test_natural_compare(self):
+ self.assert_cmp('a', 'a', 0)
+ self.assert_cmp('ab', 'a', 1)
+ self.assert_cmp('a', 'ab', -1)
+ self.assert_cmp('', '', 0)
+ self.assert_cmp('', 'ab', -1)
+ self.assert_cmp('1', '2', -1)
+ self.assert_cmp('2', '1', 1)
+ self.assert_cmp('1', '10', -1)
+ self.assert_cmp('2', '10', -1)
+ self.assert_cmp('foo_1.html', 'foo_2.html', -1)
+ self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
+ self.assert_cmp('foo_1.html', 'foo_10.html', -1)
+ self.assert_cmp('foo_2.html', 'foo_10.html', -1)
+ self.assert_cmp('foo_23.html', 'foo_10.html', 1)
+ self.assert_cmp('foo_23.html', 'foo_100.html', -1)
+
+
+class KeyCompareTest(unittest.TestCase):
+ def setUp(self):
+ def split(test_name):
+ idx = test_name.rfind('/')
+ if idx != -1:
+ return (test_name[0:idx], test_name[idx + 1:])
+
+ self.sharder = Sharder(split, '/', 1)
+
+ def assert_cmp(self, x, y, result):
+ self.assertEquals(cmp(self.sharder.test_key(x), self.sharder.test_key(y)), result)
+
+ def test_test_key(self):
+ self.assert_cmp('/a', '/a', 0)
+ self.assert_cmp('/a', '/b', -1)
+ self.assert_cmp('/a2', '/a10', -1)
+ self.assert_cmp('/a2/foo', '/a10/foo', -1)
+ self.assert_cmp('/a/foo11', '/a/foo2', 1)
+ self.assert_cmp('/ab', '/a/a/b', -1)
+ self.assert_cmp('/a/a/b', '/ab', 1)
+ self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
index 0544918f4..e6924a3ee 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager.py
@@ -45,7 +45,8 @@ import sys
import time
from webkitpy.common import message_pool
-from webkitpy.layout_tests.controllers import worker
+from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
+from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner, TestRunInterruptedException, WorkerException
from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
from webkitpy.layout_tests.layout_package import json_layout_results_generator
from webkitpy.layout_tests.layout_package import json_results_generator
@@ -253,36 +254,6 @@ def summarize_results(port_obj, expectations, result_summary, retry_summary, tes
return results
-class TestRunInterruptedException(Exception):
- """Raised when a test run should be stopped immediately."""
- def __init__(self, reason):
- Exception.__init__(self)
- self.reason = reason
- self.msg = reason
-
- def __reduce__(self):
- return self.__class__, (self.reason,)
-
-
-# Export this so callers don't need to know about message pools.
-WorkerException = message_pool.WorkerException
-
-
-class TestShard(object):
- """A test shard is a named list of TestInputs."""
-
- # FIXME: Make this class visible, used by workers as well.
- def __init__(self, name, test_inputs):
- self.name = name
- self.test_inputs = test_inputs
-
- def __repr__(self):
- return "TestShard(name='%s', test_inputs=%s'" % (self.name, self.test_inputs)
-
- def __eq__(self, other):
- return self.name == other.name and self.test_inputs == other.test_inputs
-
-
class Manager(object):
"""A class for managing running a series of tests on a series of layout
test files."""
@@ -305,240 +276,67 @@ class Manager(object):
self.PERF_SUBDIR = 'perf'
self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
- self._has_http_lock = False
-
- self._remaining_locked_shards = []
# disable wss server. need to install pyOpenSSL on buildbots.
# self._websocket_secure_server = websocket_server.PyWebSocket(
# options.results_directory, use_tls=True, port=9323)
- # a set of test files, and the same tests as a list
-
self._paths = set()
-
- # FIXME: Rename to test_names.
- self._test_files = set()
- self._test_files_list = None
- self._result_queue = Queue.Queue()
+ self._test_names = None
self._retrying = False
self._results_directory = self._port.results_directory()
-
- self._all_results = []
- self._group_stats = {}
- self._worker_stats = {}
- self._current_result_summary = None
+ self._finder = LayoutTestFinder(self._port, self._options)
+ self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._expectations, self._test_is_slow)
def _collect_tests(self, args):
- """Find all the files to test.
-
- Args:
- args: list of test arguments from the command line
-
- """
- paths = self._strip_test_dir_prefixes(args)
- if self._options.test_list:
- paths += self._strip_test_dir_prefixes(read_test_files(self._filesystem, self._options.test_list, self._port.TEST_PATH_SEPARATOR))
- self._paths = set(paths)
- self._test_files = self._port.tests(paths)
-
- def _strip_test_dir_prefixes(self, paths):
- return [self._strip_test_dir_prefix(path) for path in paths if path]
-
- def _strip_test_dir_prefix(self, path):
- # Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
- # the filesystem uses '\\' as a directory separator.
- if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
- return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
- if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
- return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
- return path
+ return self._finder.find_tests(self._options, args)
def _is_http_test(self, test):
- return self.HTTP_SUBDIR in test or self.WEBSOCKET_SUBDIR in test
+ return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
+
+ def _is_websocket_test(self, test):
+ return self.WEBSOCKET_SUBDIR in test
def _http_tests(self):
- return set(test for test in self._test_files if self._is_http_test(test))
+ return set(test for test in self._test_names if self._is_http_test(test))
def _websocket_tests(self):
- return set(test for test in self._test_files if self.WEBSOCKET_SUBDIR in test)
+ return set(test for test in self._test_files if self._is_websocket(test))
def _is_perf_test(self, test):
return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
- def _parse_expectations(self):
- self._expectations = test_expectations.TestExpectations(self._port, self._test_files)
-
- def _split_into_chunks_if_necessary(self, skipped):
- if not self._options.run_chunk and not self._options.run_part:
- return skipped
-
- # If the user specifies they just want to run a subset of the tests,
- # just grab a subset of the non-skipped tests.
- chunk_value = self._options.run_chunk or self._options.run_part
- test_files = self._test_files_list
- try:
- (chunk_num, chunk_len) = chunk_value.split(":")
- chunk_num = int(chunk_num)
- assert(chunk_num >= 0)
- test_size = int(chunk_len)
- assert(test_size > 0)
- except AssertionError:
- _log.critical("invalid chunk '%s'" % chunk_value)
- return None
-
- # Get the number of tests
- num_tests = len(test_files)
-
- # Get the start offset of the slice.
- if self._options.run_chunk:
- chunk_len = test_size
- # In this case chunk_num can be really large. We need
- # to make the slave fit in the current number of tests.
- slice_start = (chunk_num * chunk_len) % num_tests
- else:
- # Validate the data.
- assert(test_size <= num_tests)
- assert(chunk_num <= test_size)
-
- # To count the chunk_len, and make sure we don't skip
- # some tests, we round to the next value that fits exactly
- # all the parts.
- rounded_tests = num_tests
- if rounded_tests % test_size != 0:
- rounded_tests = (num_tests + test_size - (num_tests % test_size))
-
- chunk_len = rounded_tests / test_size
- slice_start = chunk_len * (chunk_num - 1)
- # It does not mind if we go over test_size.
-
- # Get the end offset of the slice.
- slice_end = min(num_tests, slice_start + chunk_len)
-
- files = test_files[slice_start:slice_end]
-
- _log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
-
- # If we reached the end and we don't have enough tests, we run some
- # from the beginning.
- if slice_end - slice_start < chunk_len:
- extra = chunk_len - (slice_end - slice_start)
- _log.debug(' last chunk is partial, appending [0:%d]' % extra)
- files.extend(test_files[0:extra])
-
- len_skip_chunk = int(len(files) * len(skipped) / float(len(self._test_files)))
- skip_chunk_list = list(skipped)[0:len_skip_chunk]
- skip_chunk = set(skip_chunk_list)
-
- # FIXME: This is a total hack.
- # Update expectations so that the stats are calculated correctly.
- # We need to pass a list that includes the right # of skipped files
- # to ParseExpectations so that ResultSummary() will get the correct
- # stats. So, we add in the subset of skipped files, and then
- # subtract them back out.
- self._test_files_list = files + skip_chunk_list
- self._test_files = set(self._test_files_list)
-
- self._parse_expectations()
-
- self._test_files = set(files)
- self._test_files_list = files
-
- return skip_chunk
-
- # FIXME: This method is way too long and needs to be broken into pieces.
- def prepare_lists_and_print_output(self):
- """Create appropriate subsets of test lists and returns a
- ResultSummary object. Also prints expected test counts.
- """
-
- # Remove skipped - both fixable and ignored - files from the
- # top-level list of files to test.
- found_test_files = set(self._test_files)
- num_all_test_files = len(self._test_files)
-
- skipped = self._expectations.get_tests_with_result_type(test_expectations.SKIP)
- if not self._options.http:
- skipped.update(set(self._http_tests()))
-
- if self._options.skipped == 'only':
- self._test_files = self._test_files.intersection(skipped)
- elif self._options.skipped == 'default':
- self._test_files -= skipped
- elif self._options.skipped == 'ignore':
- pass # just to be clear that we're ignoring the skip list.
-
- if self._options.skip_failing_tests:
- self._test_files -= self._expectations.get_tests_with_result_type(test_expectations.FAIL)
- self._test_files -= self._expectations.get_tests_with_result_type(test_expectations.FLAKY)
-
- # now make sure we're explicitly running any tests passed on the command line.
- self._test_files.update(found_test_files.intersection(self._paths))
-
- num_to_run = len(self._test_files)
- num_skipped = num_all_test_files - num_to_run
-
- if not num_to_run:
- _log.critical('No tests to run.')
- return None
+ def _prepare_lists(self):
+ tests_to_skip = self._finder.skip_tests(self._paths, self._test_names, self._expectations, self._http_tests())
+ self._test_names = list(set(self._test_names) - tests_to_skip)
# Create a sorted list of test files so the subset chunk,
# if used, contains alphabetically consecutive tests.
- self._test_files_list = list(self._test_files)
if self._options.randomize_order:
- random.shuffle(self._test_files_list)
+ random.shuffle(self._test_names)
else:
- self._test_files_list.sort(key=lambda test: test_key(self._port, test))
+ self._test_names.sort(key=self._runner.test_key)
- skipped = self._split_into_chunks_if_necessary(skipped)
+ self._test_names, tests_in_other_chunks = self._finder.split_into_chunks(self._test_names)
+ self._expectations.add_skipped_tests(tests_in_other_chunks)
+ tests_to_skip.update(tests_in_other_chunks)
- # FIXME: It's unclear how --repeat-each and --iterations should interact with chunks?
- if self._options.repeat_each:
+ if self._options.repeat_each > 1:
list_with_repetitions = []
- for test in self._test_files_list:
+ for test in self._test_names:
list_with_repetitions += ([test] * self._options.repeat_each)
- self._test_files_list = list_with_repetitions
-
- if self._options.iterations:
- self._test_files_list = self._test_files_list * self._options.iterations
-
- iterations = \
- (self._options.repeat_each if self._options.repeat_each else 1) * \
- (self._options.iterations if self._options.iterations else 1)
- result_summary = ResultSummary(self._expectations, self._test_files | skipped, iterations)
-
- self._printer.print_expected(num_all_test_files, result_summary, self._expectations.get_tests_with_result_type)
-
- if self._options.skipped != 'ignore':
- # Note that we don't actually run the skipped tests (they were
- # subtracted out of self._test_files, above), but we stub out the
- # results here so the statistics can remain accurate.
- for test in skipped:
- result = test_results.TestResult(test)
- result.type = test_expectations.SKIP
- for iteration in range(iterations):
- result_summary.add(result, expected=True, test_is_slow=self._test_is_slow(test))
-
- return result_summary
-
- def _get_dir_for_test_file(self, test_file):
- """Returns the highest-level directory by which to shard the given
- test file."""
- directory, test_file = self._port.split_test(test_file)
-
- # The http tests are very stable on mac/linux.
- # TODO(ojan): Make the http server on Windows be apache so we can
- # turn shard the http tests there as well. Switching to apache is
- # what made them stable on linux/mac.
- return directory
-
- def _get_test_input_for_file(self, test_file):
- """Returns the appropriate TestInput object for the file. Mostly this
- is used for looking up the timeout value (in ms) to use for the given
- test."""
- if self._test_is_slow(test_file):
- return TestInput(test_file, self._options.slow_time_out_ms)
- return TestInput(test_file, self._options.time_out_ms)
+ self._test_names = list_with_repetitions
+
+ if self._options.iterations > 1:
+ self._test_names = self._test_names * self._options.iterations
+
+ iterations = self._options.repeat_each * self._options.iterations
+ return ResultSummary(self._expectations, set(self._test_names), iterations, tests_to_skip)
+
+ def _test_input_for_file(self, test_file):
+ return TestInput(test_file,
+ self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
+ self._test_requires_lock(test_file))
def _test_requires_lock(self, test_file):
"""Return True if the test needs to be locked when
@@ -550,261 +348,15 @@ class Manager(object):
def _test_is_slow(self, test_file):
return self._expectations.has_modifier(test_file, test_expectations.SLOW)
- def _is_ref_test(self, test_input):
- if test_input.reference_files is None:
- # Lazy initialization.
- test_input.reference_files = self._port.reference_files(test_input.test_name)
- return bool(test_input.reference_files)
-
- def _shard_tests(self, test_files, num_workers, fully_parallel, shard_ref_tests):
- """Groups tests into batches.
- This helps ensure that tests that depend on each other (aka bad tests!)
- continue to run together as most cross-tests dependencies tend to
- occur within the same directory.
- Return:
- Two list of TestShards. The first contains tests that must only be
- run under the server lock, the second can be run whenever.
- """
-
- # FIXME: Move all of the sharding logic out of manager into its
- # own class or module. Consider grouping it with the chunking logic
- # in prepare_lists as well.
- if num_workers == 1:
- return self._shard_in_two(test_files, shard_ref_tests)
- elif fully_parallel:
- return self._shard_every_file(test_files)
- return self._shard_by_directory(test_files, num_workers, shard_ref_tests)
-
- def _shard_in_two(self, test_files, shard_ref_tests):
- """Returns two lists of shards, one with all the tests requiring a lock and one with the rest.
-
- This is used when there's only one worker, to minimize the per-shard overhead."""
- locked_inputs = []
- locked_ref_test_inputs = []
- unlocked_inputs = []
- unlocked_ref_test_inputs = []
- for test_file in test_files:
- test_input = self._get_test_input_for_file(test_file)
- if self._test_requires_lock(test_file):
- if shard_ref_tests and self._is_ref_test(test_input):
- locked_ref_test_inputs.append(test_input)
- else:
- locked_inputs.append(test_input)
- else:
- if shard_ref_tests and self._is_ref_test(test_input):
- unlocked_ref_test_inputs.append(test_input)
- else:
- unlocked_inputs.append(test_input)
- locked_inputs.extend(locked_ref_test_inputs)
- unlocked_inputs.extend(unlocked_ref_test_inputs)
-
- locked_shards = []
- unlocked_shards = []
- if locked_inputs:
- locked_shards = [TestShard('locked_tests', locked_inputs)]
- if unlocked_inputs:
- unlocked_shards.append(TestShard('unlocked_tests', unlocked_inputs))
-
- return locked_shards, unlocked_shards
-
- def _shard_every_file(self, test_files):
- """Returns two lists of shards, each shard containing a single test file.
-
- This mode gets maximal parallelism at the cost of much higher flakiness."""
- locked_shards = []
- unlocked_shards = []
- for test_file in test_files:
- test_input = self._get_test_input_for_file(test_file)
-
- # Note that we use a '.' for the shard name; the name doesn't really
- # matter, and the only other meaningful value would be the filename,
- # which would be really redundant.
- if self._test_requires_lock(test_file):
- locked_shards.append(TestShard('.', [test_input]))
- else:
- unlocked_shards.append(TestShard('.', [test_input]))
-
- return locked_shards, unlocked_shards
-
- def _shard_by_directory(self, test_files, num_workers, shard_ref_tests):
- """Returns two lists of shards, each shard containing all the files in a directory.
-
- This is the default mode, and gets as much parallelism as we can while
- minimizing flakiness caused by inter-test dependencies."""
- locked_shards = []
- unlocked_shards = []
- tests_by_dir = {}
- ref_tests_by_dir = {}
- # FIXME: Given that the tests are already sorted by directory,
- # we can probably rewrite this to be clearer and faster.
- for test_file in test_files:
- directory = self._get_dir_for_test_file(test_file)
- test_input = self._get_test_input_for_file(test_file)
- if shard_ref_tests and self._is_ref_test(test_input):
- ref_tests_by_dir.setdefault(directory, [])
- ref_tests_by_dir[directory].append(test_input)
- else:
- tests_by_dir.setdefault(directory, [])
- tests_by_dir[directory].append(test_input)
-
- for directory, test_inputs in tests_by_dir.iteritems():
- shard = TestShard(directory, test_inputs)
- if self._test_requires_lock(directory):
- locked_shards.append(shard)
- else:
- unlocked_shards.append(shard)
-
- for directory, test_inputs in ref_tests_by_dir.iteritems():
- # '~' to place the ref tests after other tests after sorted.
- shard = TestShard('~ref:' + directory, test_inputs)
- if self._test_requires_lock(directory):
- locked_shards.append(shard)
- else:
- unlocked_shards.append(shard)
-
- # Sort the shards by directory name.
- locked_shards.sort(key=lambda shard: shard.name)
- unlocked_shards.sort(key=lambda shard: shard.name)
-
- return (self._resize_shards(locked_shards, self._max_locked_shards(num_workers),
- 'locked_shard'),
- unlocked_shards)
-
- def _max_locked_shards(self, num_workers):
- # Put a ceiling on the number of locked shards, so that we
- # don't hammer the servers too badly.
-
- # FIXME: For now, limit to one shard or set it
- # with the --max-locked-shards. After testing to make sure we
- # can handle multiple shards, we should probably do something like
- # limit this to no more than a quarter of all workers, e.g.:
- # return max(math.ceil(num_workers / 4.0), 1)
- if self._options.max_locked_shards:
- num_of_locked_shards = self._options.max_locked_shards
- else:
- num_of_locked_shards = 1
-
- return num_of_locked_shards
-
- def _resize_shards(self, old_shards, max_new_shards, shard_name_prefix):
- """Takes a list of shards and redistributes the tests into no more
- than |max_new_shards| new shards."""
-
- # This implementation assumes that each input shard only contains tests from a
- # single directory, and that tests in each shard must remain together; as a
- # result, a given input shard is never split between output shards.
- #
- # Each output shard contains the tests from one or more input shards and
- # hence may contain tests from multiple directories.
-
- def divide_and_round_up(numerator, divisor):
- return int(math.ceil(float(numerator) / divisor))
-
- def extract_and_flatten(shards):
- test_inputs = []
- for shard in shards:
- test_inputs.extend(shard.test_inputs)
- return test_inputs
-
- def split_at(seq, index):
- return (seq[:index], seq[index:])
-
- num_old_per_new = divide_and_round_up(len(old_shards), max_new_shards)
- new_shards = []
- remaining_shards = old_shards
- while remaining_shards:
- some_shards, remaining_shards = split_at(remaining_shards, num_old_per_new)
- new_shards.append(TestShard('%s_%d' % (shard_name_prefix, len(new_shards) + 1),
- extract_and_flatten(some_shards)))
- return new_shards
-
- def _run_tests(self, file_list, result_summary, num_workers):
- """Runs the tests in the file_list.
-
- Return: A tuple (interrupted, keyboard_interrupted, thread_timings,
- test_timings, individual_test_timings)
- interrupted is whether the run was interrupted
- keyboard_interrupted is whether the interruption was because someone
- typed Ctrl^C
- thread_timings is a list of dicts with the total runtime
- of each thread with 'name', 'num_tests', 'total_time' properties
- test_timings is a list of timings for each sharded subdirectory
- of the form [time, directory_name, num_tests]
- individual_test_timings is a list of run times for each test
- in the form {filename:filename, test_run_time:test_run_time}
- result_summary: summary object to populate with the results
- """
- self._current_result_summary = result_summary
- self._all_results = []
- self._group_stats = {}
- self._worker_stats = {}
-
- keyboard_interrupted = False
- interrupted = False
-
- self._printer.write_update('Sharding tests ...')
- locked_shards, unlocked_shards = self._shard_tests(file_list, int(self._options.child_processes), self._options.fully_parallel, self._options.shard_ref_tests)
-
- # FIXME: We don't have a good way to coordinate the workers so that
- # they don't try to run the shards that need a lock if we don't actually
- # have the lock. The easiest solution at the moment is to grab the
- # lock at the beginning of the run, and then run all of the locked
- # shards first. This minimizes the time spent holding the lock, but
- # means that we won't be running tests while we're waiting for the lock.
- # If this becomes a problem in practice we'll need to change this.
-
- all_shards = locked_shards + unlocked_shards
- self._remaining_locked_shards = locked_shards
- if locked_shards and self._options.http:
- self.start_servers_with_lock(2 * min(num_workers, len(locked_shards)))
-
- num_workers = min(num_workers, len(all_shards))
- self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards))
-
- def worker_factory(worker_connection):
- return worker.Worker(worker_connection, self.results_directory(), self._options)
-
- if self._options.dry_run:
- return (keyboard_interrupted, interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
-
- self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers))
-
- try:
- with message_pool.get(self, worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool:
- pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards)
- except KeyboardInterrupt:
- self._printer.flush()
- self._printer.write('Interrupted, exiting ...')
- keyboard_interrupted = True
- except TestRunInterruptedException, e:
- _log.warning(e.reason)
- interrupted = True
- except Exception, e:
- _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e)))
- raise
- finally:
- self.stop_servers_with_lock()
-
- # FIXME: should this be a class instead of a tuple?
- return (interrupted, keyboard_interrupted, self._worker_stats.values(), self._group_stats, self._all_results)
-
- def results_directory(self):
- if not self._retrying:
- return self._results_directory
- else:
- self._filesystem.maybe_make_directory(self._filesystem.join(self._results_directory, 'retries'))
- return self._filesystem.join(self._results_directory, 'retries')
-
def needs_servers(self):
- return any(self._test_requires_lock(test_name) for test_name in self._test_files) and self._options.http
+ return any(self._test_requires_lock(test_name) for test_name in self._test_names) and self._options.http
def _set_up_run(self):
- """Configures the system to be ready to run tests.
-
- Returns a ResultSummary object if we should continue to run tests,
- or None if we should abort.
+ self._printer.write_update("Checking build ...")
+ if not self._port.check_build(self.needs_servers()):
+ _log.error("Build check failed")
+ return False
- """
# This must be started before we check the system dependencies,
# since the helper may do things to make the setup correct.
if self._options.pixel_tests:
@@ -816,7 +368,7 @@ class Manager(object):
self._printer.write_update("Checking system dependencies ...")
if not self._port.check_sys_deps(self.needs_servers()):
self._port.stop_helper()
- return None
+ return False
if self._options.clobber_old_results:
self._clobber_old_results()
@@ -825,43 +377,42 @@ class Manager(object):
self._port.host.filesystem.maybe_make_directory(self._results_directory)
self._port.setup_test_run()
-
- self._printer.write_update("Preparing tests ...")
- result_summary = self.prepare_lists_and_print_output()
- if not result_summary:
- return None
-
- return result_summary
+ return True
def run(self, args):
"""Run all our tests on all our test files and return the number of unexpected results (0 == success)."""
self._printer.write_update("Collecting tests ...")
try:
- self._collect_tests(args)
- except IOError as e:
- # This is raised when the --test-list doesn't exist.
- return -1
-
- self._printer.write_update("Checking build ...")
- if not self._port.check_build(self.needs_servers()):
- _log.error("Build check failed")
+ self._paths, self._test_names = self._collect_tests(args)
+ except IOError as exception:
+ # This is raised if --test-list doesn't exist
return -1
self._printer.write_update("Parsing expectations ...")
- self._parse_expectations()
+ self._expectations = test_expectations.TestExpectations(self._port, self._test_names)
+
+ num_all_test_files_found = len(self._test_names)
+ result_summary = self._prepare_lists()
- result_summary = self._set_up_run()
- if not result_summary:
+ # Check to make sure we're not skipping every test.
+ if not self._test_names:
+ _log.critical('No tests to run.')
return -1
- assert(len(self._test_files))
+ self._printer.print_found(num_all_test_files_found, len(self._test_names), self._options.repeat_each, self._options.iterations)
+ self._printer.print_expected(result_summary, self._expectations.get_tests_with_result_type)
+
+ if not self._set_up_run():
+ return -1
start_time = time.time()
- interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = self._run_tests(self._test_files_list, result_summary, int(self._options.child_processes))
+ interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings = \
+ self._run_tests(self._test_names, result_summary, int(self._options.child_processes))
# We exclude the crashes from the list of results to retry, because
# we want to treat even a potentially flaky crash as an error.
+
failures = self._get_failures(result_summary, include_crashes=self._port.should_retry_crashes(), include_missing=False)
retry_summary = result_summary
while (len(failures) and self._options.retry_failures and not self._retrying and not interrupted and not keyboard_interrupted):
@@ -869,9 +420,9 @@ class Manager(object):
_log.info("Retrying %d unexpected failure(s) ..." % len(failures))
_log.info('')
self._retrying = True
- retry_summary = ResultSummary(self._expectations, failures.keys())
+ retry_summary = ResultSummary(self._expectations, failures.keys(), 1, set())
# Note that we intentionally ignore the return value here.
- self._run_tests(failures.keys(), retry_summary, num_workers=1)
+ self._run_tests(failures.keys(), retry_summary, 1)
failures = self._get_failures(retry_summary, include_crashes=True, include_missing=True)
end_time = time.time()
@@ -906,28 +457,11 @@ class Manager(object):
return self._port.exit_code_from_summarized_results(unexpected_results)
- def start_servers_with_lock(self, number_of_servers):
- self._printer.write_update('Acquiring http lock ...')
- self._port.acquire_http_lock()
- if self._http_tests():
- self._printer.write_update('Starting HTTP server ...')
- self._port.start_http_server(number_of_servers=number_of_servers)
- if self._websocket_tests():
- self._printer.write_update('Starting WebSocket server ...')
- self._port.start_websocket_server()
- self._has_http_lock = True
-
- def stop_servers_with_lock(self):
- if self._has_http_lock:
- if self._http_tests():
- self._printer.write_update('Stopping HTTP server ...')
- self._port.stop_http_server()
- if self._websocket_tests():
- self._printer.write_update('Stopping WebSocket server ...')
- self._port.stop_websocket_server()
- self._printer.write_update('Releasing server lock ...')
- self._port.release_http_lock()
- self._has_http_lock = False
+ def _run_tests(self, tests, result_summary, num_workers):
+ test_inputs = [self._test_input_for_file(test) for test in tests]
+ needs_http = any(self._is_http_test(test) for test in tests)
+ needs_websockets = any(self._is_websocket_test(test) for test in tests)
+ return self._runner.run_tests(test_inputs, self._expectations, result_summary, num_workers, needs_http, needs_websockets, self._retrying)
def _clean_up_run(self):
"""Restores the system after we're done running tests."""
@@ -963,52 +497,6 @@ class Manager(object):
writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
writer.write_crash_log(crash_log)
- def _mark_interrupted_tests_as_skipped(self, result_summary):
- for test_name in self._test_files:
- if test_name not in result_summary.results:
- result = test_results.TestResult(test_name, [test_failures.FailureEarlyExit()])
- # FIXME: We probably need to loop here if there are multiple iterations.
- # FIXME: Also, these results are really neither expected nor unexpected. We probably
- # need a third type of result.
- result_summary.add(result, expected=False, test_is_slow=self._test_is_slow(test_name))
-
- def _interrupt_if_at_failure_limits(self, result_summary):
- # Note: The messages in this method are constructed to match old-run-webkit-tests
- # so that existing buildbot grep rules work.
- def interrupt_if_at_failure_limit(limit, failure_count, result_summary, message):
- if limit and failure_count >= limit:
- message += " %d tests run." % (result_summary.expected + result_summary.unexpected)
- self._mark_interrupted_tests_as_skipped(result_summary)
- raise TestRunInterruptedException(message)
-
- interrupt_if_at_failure_limit(
- self._options.exit_after_n_failures,
- result_summary.unexpected_failures,
- result_summary,
- "Exiting early after %d failures." % result_summary.unexpected_failures)
- interrupt_if_at_failure_limit(
- self._options.exit_after_n_crashes_or_timeouts,
- result_summary.unexpected_crashes + result_summary.unexpected_timeouts,
- result_summary,
- # This differs from ORWT because it does not include WebProcess crashes.
- "Exiting early after %d crashes and %d timeouts." % (result_summary.unexpected_crashes, result_summary.unexpected_timeouts))
-
- def _update_summary_with_result(self, result_summary, result):
- if result.type == test_expectations.SKIP:
- exp_str = got_str = 'SKIP'
- expected = True
- else:
- expected = self._expectations.matches_an_expected_result(result.test_name, result.type, self._options.pixel_tests or test_failures.is_reftest_failure(result.failures))
- exp_str = self._expectations.get_expectations_string(result.test_name)
- got_str = self._expectations.expectation_to_string(result.type)
-
- result_summary.add(result, expected, self._test_is_slow(result.test_name))
-
- # FIXME: there's too many arguments to this function.
- self._printer.print_finished_test(result, expected, exp_str, got_str, result_summary, self._retrying, self._test_files_list)
-
- self._interrupt_if_at_failure_limits(result_summary)
-
def _clobber_old_results(self):
# Just clobber the actual test results directories since the other
# files in the results directory are explicitly used for cross-run
@@ -1076,7 +564,7 @@ class Manager(object):
self._port, self._options.builder_name, self._options.build_name,
self._options.build_number, self._results_directory,
BUILDER_BASE_URL, individual_test_timings,
- self._expectations, result_summary, self._test_files_list,
+ self._expectations, result_summary, self._test_names,
self._options.test_results_server,
"layout-tests",
self._options.master_name)
@@ -1122,84 +610,3 @@ class Manager(object):
results_filename = self._filesystem.join(self._results_directory, "results.html")
self._port.show_results_html_file(results_filename)
-
- def handle(self, name, source, *args):
- method = getattr(self, '_handle_' + name)
- if method:
- return method(source, *args)
- raise AssertionError('unknown message %s received from %s, args=%s' % (name, source, repr(args)))
-
- def _handle_started_test(self, worker_name, test_input, test_timeout_sec):
- # FIXME: log that we've started another test.
- pass
-
- def _handle_finished_test_list(self, worker_name, list_name, num_tests, elapsed_time):
- self._group_stats[list_name] = (num_tests, elapsed_time)
-
- def find(name, test_lists):
- for i in range(len(test_lists)):
- if test_lists[i].name == name:
- return i
- return -1
-
- index = find(list_name, self._remaining_locked_shards)
- if index >= 0:
- self._remaining_locked_shards.pop(index)
- if not self._remaining_locked_shards:
- self.stop_servers_with_lock()
-
- def _handle_finished_test(self, worker_name, result, elapsed_time, log_messages=[]):
- self._worker_stats.setdefault(worker_name, {'name': worker_name, 'num_tests': 0, 'total_time': 0})
- self._worker_stats[worker_name]['total_time'] += elapsed_time
- self._worker_stats[worker_name]['num_tests'] += 1
- self._all_results.append(result)
- self._update_summary_with_result(self._current_result_summary, result)
-
-
-def read_test_files(fs, filenames, test_path_separator):
- tests = []
- for filename in filenames:
- try:
- if test_path_separator != fs.sep:
- filename = filename.replace(test_path_separator, fs.sep)
- file_contents = fs.read_text_file(filename).split('\n')
- for line in file_contents:
- line = test_expectations.strip_comments(line)
- if line:
- tests.append(line)
- except IOError, e:
- if e.errno == errno.ENOENT:
- _log.critical('')
- _log.critical('--test-list file "%s" not found' % filename)
- raise
- return tests
-
-
-# FIXME: These two free functions belong either on manager (since it's the only one
-# which uses them) or in a different file (if they need to be re-used).
-def test_key(port, test_name):
- """Turns a test name into a list with two sublists, the natural key of the
- dirname, and the natural key of the basename.
-
- This can be used when sorting paths so that files in a directory.
- directory are kept together rather than being mixed in with files in
- subdirectories."""
- dirname, basename = port.split_test(test_name)
- return (natural_sort_key(dirname + port.TEST_PATH_SEPARATOR), natural_sort_key(basename))
-
-
-def natural_sort_key(string_to_split):
- """ Turn a string into a list of string and number chunks.
- "z23a" -> ["z", 23, "a"]
-
- Can be used to implement "natural sort" order. See:
- http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
- http://nedbatchelder.com/blog/200712.html#e20071211T054956
- """
- def tryint(val):
- try:
- return int(val)
- except ValueError:
- return val
-
- return [tryint(chunk) for chunk in re.split('(\d+)', string_to_split)]
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
index 576d423af..ce511813d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/manager_unittest.py
@@ -30,309 +30,27 @@
"""Unit tests for manager.py."""
-import StringIO
import sys
import time
import unittest
-from webkitpy.common.system.filesystem_mock import MockFileSystem
-from webkitpy.common.system import outputcapture
-from webkitpy.thirdparty.mock import Mock
-from webkitpy import layout_tests
-from webkitpy.layout_tests.port import port_testcase
-
-from webkitpy import layout_tests
-from webkitpy.layout_tests import run_webkit_tests
-from webkitpy.layout_tests.controllers import manager
-from webkitpy.layout_tests.controllers.manager import interpret_test_failures, Manager, natural_sort_key, test_key, TestRunInterruptedException, TestShard
-from webkitpy.layout_tests.models import result_summary
+from webkitpy.common.host_mock import MockHost
+from webkitpy.layout_tests.controllers.manager import Manager, interpret_test_failures, summarize_results
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models import test_failures
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models.result_summary import ResultSummary
-from webkitpy.layout_tests.models.test_expectations import TestExpectations
-from webkitpy.layout_tests.models.test_results import TestResult
-from webkitpy.layout_tests.views import printing
+from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
-from webkitpy.common.system.executive_mock import MockExecutive
-from webkitpy.common.host_mock import MockHost
-
-
-class ManagerWrapper(Manager):
- def __init__(self, ref_tests, **kwargs):
- Manager.__init__(self, **kwargs)
- self._ref_tests = ref_tests
-
- def _get_test_input_for_file(self, test_file):
- return test_file
-
- def _is_ref_test(self, test_input):
- return test_input in self._ref_tests
-
-
-class ShardingTests(unittest.TestCase):
- test_list = [
- "http/tests/websocket/tests/unicode.htm",
- "animations/keyframes.html",
- "http/tests/security/view-source-no-refresh.html",
- "http/tests/websocket/tests/websocket-protocol-ignored.html",
- "fast/css/display-none-inline-style-change-crash.html",
- "http/tests/xmlhttprequest/supported-xml-content-types.html",
- "dom/html/level2/html/HTMLAnchorElement03.html",
- "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
- "dom/html/level2/html/HTMLAnchorElement06.html",
- "perf/object-keys.html",
- ]
-
- ref_tests = [
- "http/tests/security/view-source-no-refresh.html",
- "http/tests/websocket/tests/websocket-protocol-ignored.html",
- "ietestcenter/Javascript/11.1.5_4-4-c-1.html",
- "dom/html/level2/html/HTMLAnchorElement06.html",
- ]
-
- def get_shards(self, num_workers, fully_parallel, shard_ref_tests=False, test_list=None, max_locked_shards=None):
- test_list = test_list or self.test_list
- host = MockHost()
- port = host.port_factory.get(port_name='test')
- port._filesystem = MockFileSystem()
- options = MockOptions(max_locked_shards=max_locked_shards)
- self.manager = ManagerWrapper(self.ref_tests, port=port, options=options, printer=Mock())
- return self.manager._shard_tests(test_list, num_workers, fully_parallel, shard_ref_tests)
-
- def test_shard_by_dir(self):
- locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False)
-
- # Note that although there are tests in multiple dirs that need locks,
- # they are crammed into a single shard in order to reduce the # of
- # workers hitting the server at once.
- self.assertEquals(locked,
- [TestShard('locked_shard_1',
- ['http/tests/security/view-source-no-refresh.html',
- 'http/tests/websocket/tests/unicode.htm',
- 'http/tests/websocket/tests/websocket-protocol-ignored.html',
- 'http/tests/xmlhttprequest/supported-xml-content-types.html',
- 'perf/object-keys.html'])])
- self.assertEquals(unlocked,
- [TestShard('animations',
- ['animations/keyframes.html']),
- TestShard('dom/html/level2/html',
- ['dom/html/level2/html/HTMLAnchorElement03.html',
- 'dom/html/level2/html/HTMLAnchorElement06.html']),
- TestShard('fast/css',
- ['fast/css/display-none-inline-style-change-crash.html']),
- TestShard('ietestcenter/Javascript',
- ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
-
- def test_shard_by_dir_sharding_ref_tests(self):
- locked, unlocked = self.get_shards(num_workers=2, fully_parallel=False, shard_ref_tests=True)
-
- # Note that although there are tests in multiple dirs that need locks,
- # they are crammed into a single shard in order to reduce the # of
- # workers hitting the server at once.
- self.assertEquals(locked,
- [TestShard('locked_shard_1',
- ['http/tests/websocket/tests/unicode.htm',
- 'http/tests/xmlhttprequest/supported-xml-content-types.html',
- 'perf/object-keys.html',
- 'http/tests/security/view-source-no-refresh.html',
- 'http/tests/websocket/tests/websocket-protocol-ignored.html'])])
- self.assertEquals(unlocked,
- [TestShard('animations',
- ['animations/keyframes.html']),
- TestShard('dom/html/level2/html',
- ['dom/html/level2/html/HTMLAnchorElement03.html']),
- TestShard('fast/css',
- ['fast/css/display-none-inline-style-change-crash.html']),
- TestShard('~ref:dom/html/level2/html',
- ['dom/html/level2/html/HTMLAnchorElement06.html']),
- TestShard('~ref:ietestcenter/Javascript',
- ['ietestcenter/Javascript/11.1.5_4-4-c-1.html'])])
-
- def test_shard_every_file(self):
- locked, unlocked = self.get_shards(num_workers=2, fully_parallel=True)
- self.assertEquals(locked,
- [TestShard('.', ['http/tests/websocket/tests/unicode.htm']),
- TestShard('.', ['http/tests/security/view-source-no-refresh.html']),
- TestShard('.', ['http/tests/websocket/tests/websocket-protocol-ignored.html']),
- TestShard('.', ['http/tests/xmlhttprequest/supported-xml-content-types.html']),
- TestShard('.', ['perf/object-keys.html'])]),
- self.assertEquals(unlocked,
- [TestShard('.', ['animations/keyframes.html']),
- TestShard('.', ['fast/css/display-none-inline-style-change-crash.html']),
- TestShard('.', ['dom/html/level2/html/HTMLAnchorElement03.html']),
- TestShard('.', ['ietestcenter/Javascript/11.1.5_4-4-c-1.html']),
- TestShard('.', ['dom/html/level2/html/HTMLAnchorElement06.html'])])
-
- def test_shard_in_two(self):
- locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False)
- self.assertEquals(locked,
- [TestShard('locked_tests',
- ['http/tests/websocket/tests/unicode.htm',
- 'http/tests/security/view-source-no-refresh.html',
- 'http/tests/websocket/tests/websocket-protocol-ignored.html',
- 'http/tests/xmlhttprequest/supported-xml-content-types.html',
- 'perf/object-keys.html'])])
- self.assertEquals(unlocked,
- [TestShard('unlocked_tests',
- ['animations/keyframes.html',
- 'fast/css/display-none-inline-style-change-crash.html',
- 'dom/html/level2/html/HTMLAnchorElement03.html',
- 'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
- 'dom/html/level2/html/HTMLAnchorElement06.html'])])
-
- def test_shard_in_two_sharding_ref_tests(self):
- locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False, shard_ref_tests=True)
- self.assertEquals(locked,
- [TestShard('locked_tests',
- ['http/tests/websocket/tests/unicode.htm',
- 'http/tests/xmlhttprequest/supported-xml-content-types.html',
- 'perf/object-keys.html',
- 'http/tests/security/view-source-no-refresh.html',
- 'http/tests/websocket/tests/websocket-protocol-ignored.html'])])
- self.assertEquals(unlocked,
- [TestShard('unlocked_tests',
- ['animations/keyframes.html',
- 'fast/css/display-none-inline-style-change-crash.html',
- 'dom/html/level2/html/HTMLAnchorElement03.html',
- 'ietestcenter/Javascript/11.1.5_4-4-c-1.html',
- 'dom/html/level2/html/HTMLAnchorElement06.html'])])
-
- def test_shard_in_two_has_no_locked_shards(self):
- locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
- test_list=['animations/keyframe.html'])
- self.assertEquals(len(locked), 0)
- self.assertEquals(len(unlocked), 1)
-
- def test_shard_in_two_has_no_unlocked_shards(self):
- locked, unlocked = self.get_shards(num_workers=1, fully_parallel=False,
- test_list=['http/tests/webcoket/tests/unicode.htm'])
- self.assertEquals(len(locked), 1)
- self.assertEquals(len(unlocked), 0)
-
- def test_multiple_locked_shards(self):
- locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False, max_locked_shards=2)
- self.assertEqual(locked,
- [TestShard('locked_shard_1',
- ['http/tests/security/view-source-no-refresh.html',
- 'http/tests/websocket/tests/unicode.htm',
- 'http/tests/websocket/tests/websocket-protocol-ignored.html']),
- TestShard('locked_shard_2',
- ['http/tests/xmlhttprequest/supported-xml-content-types.html',
- 'perf/object-keys.html'])])
-
- locked, unlocked = self.get_shards(num_workers=4, fully_parallel=False)
- self.assertEquals(locked,
- [TestShard('locked_shard_1',
- ['http/tests/security/view-source-no-refresh.html',
- 'http/tests/websocket/tests/unicode.htm',
- 'http/tests/websocket/tests/websocket-protocol-ignored.html',
- 'http/tests/xmlhttprequest/supported-xml-content-types.html',
- 'perf/object-keys.html'])])
-
-
-class LockCheckingManager(Manager):
- def __init__(self, port, options, printer, tester, http_lock):
- super(LockCheckingManager, self).__init__(port, options, printer)
- self._finished_list_called = False
- self._tester = tester
- self._should_have_http_lock = http_lock
-
- def handle_finished_list(self, source, list_name, num_tests, elapsed_time):
- if not self._finished_list_called:
- self._tester.assertEquals(list_name, 'locked_tests')
- self._tester.assertTrue(self._remaining_locked_shards)
- self._tester.assertTrue(self._has_http_lock is self._should_have_http_lock)
-
- super(LockCheckingManager, self).handle_finished_list(source, list_name, num_tests, elapsed_time)
-
- if not self._finished_list_called:
- self._tester.assertEquals(self._remaining_locked_shards, [])
- self._tester.assertFalse(self._has_http_lock)
- self._finished_list_called = True
class ManagerTest(unittest.TestCase):
- def get_options(self):
- return MockOptions(pixel_tests=False, new_baseline=False, time_out_ms=6000, slow_time_out_ms=30000, worker_model='inline')
-
- def test_http_locking(tester):
- options, args = run_webkit_tests.parse_args(['--platform=test', '--print=nothing', 'http/tests/passes', 'passes'])
- host = MockHost()
- port = host.port_factory.get(port_name=options.platform, options=options)
- run_webkit_tests._set_up_derived_options(port, options)
- printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO())
- manager = LockCheckingManager(port, options, printer, tester, True)
- num_unexpected_results = manager.run(args)
- printer.cleanup()
- tester.assertEquals(num_unexpected_results, 0)
-
- def test_perf_locking(tester):
- options, args = run_webkit_tests.parse_args(['--platform=test', '--print=nothing', '--no-http', 'passes', 'perf/'])
- host = MockHost()
- port = host.port_factory.get(port_name=options.platform, options=options)
- run_webkit_tests._set_up_derived_options(port, options)
- printer = printing.Printer(port, options, StringIO.StringIO(), StringIO.StringIO())
- manager = LockCheckingManager(port, options, printer, tester, False)
- num_unexpected_results = manager.run(args)
- printer.cleanup()
- tester.assertEquals(num_unexpected_results, 0)
-
- def test_interrupt_if_at_failure_limits(self):
- port = Mock() # FIXME: This should be a tighter mock.
- port.TEST_PATH_SEPARATOR = '/'
- port._filesystem = MockFileSystem()
- manager = Manager(port=port, options=MockOptions(), printer=Mock())
-
- manager._options = MockOptions(exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None)
- manager._test_files = ['foo/bar.html', 'baz.html']
- manager._test_is_slow = lambda test_name: False
-
- result_summary = ResultSummary(expectations=Mock(), test_files=manager._test_files)
- result_summary.unexpected_failures = 100
- result_summary.unexpected_crashes = 50
- result_summary.unexpected_timeouts = 50
- # No exception when the exit_after* options are None.
- manager._interrupt_if_at_failure_limits(result_summary)
-
- # No exception when we haven't hit the limit yet.
- manager._options.exit_after_n_failures = 101
- manager._options.exit_after_n_crashes_or_timeouts = 101
- manager._interrupt_if_at_failure_limits(result_summary)
-
- # Interrupt if we've exceeded either limit:
- manager._options.exit_after_n_crashes_or_timeouts = 10
- self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
-
- self.assertEquals(result_summary.results['foo/bar.html'].type, test_expectations.SKIP)
- self.assertEquals(result_summary.results['baz.html'].type, test_expectations.SKIP)
-
- manager._options.exit_after_n_crashes_or_timeouts = None
- manager._options.exit_after_n_failures = 10
- exception = self.assertRaises(TestRunInterruptedException, manager._interrupt_if_at_failure_limits, result_summary)
-
- def test_update_summary_with_result(self):
- host = MockHost()
- port = host.port_factory.get('test-win-xp')
- test = 'failures/expected/reftest.html'
- port.expectations_dict = lambda: {'': 'WONTFIX : failures/expected/reftest.html = IMAGE'}
- expectations = TestExpectations(port, tests=[test])
- # Reftests expected to be image mismatch should be respected when pixel_tests=False.
- manager = Manager(port=port, options=MockOptions(pixel_tests=False, exit_after_n_failures=None, exit_after_n_crashes_or_timeouts=None), printer=Mock())
- manager._expectations = expectations
- result_summary = ResultSummary(expectations=expectations, test_files=[test])
- result = TestResult(test_name=test, failures=[test_failures.FailureReftestMismatchDidNotOccur()])
- manager._update_summary_with_result(result_summary, result)
- self.assertEquals(1, result_summary.expected)
- self.assertEquals(0, result_summary.unexpected)
-
def test_needs_servers(self):
def get_manager_with_tests(test_names):
port = Mock() # FIXME: Use a tighter mock.
port.TEST_PATH_SEPARATOR = '/'
- manager = Manager(port, options=MockOptions(http=True), printer=Mock())
- manager._test_files = set(test_names)
- manager._test_files_list = test_names
+ manager = Manager(port, options=MockOptions(http=True, max_locked_shards=1), printer=Mock())
+ manager._test_names = test_names
return manager
manager = get_manager_with_tests(['fast/html'])
@@ -345,7 +63,7 @@ class ManagerTest(unittest.TestCase):
def get_manager_with_tests(test_names):
host = MockHost()
port = host.port_factory.get()
- manager = Manager(port, options=MockOptions(test_list=None, http=True), printer=Mock())
+ manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
manager._collect_tests(test_names)
return manager
@@ -366,110 +84,17 @@ class ManagerTest(unittest.TestCase):
def get_manager_with_tests(test_names):
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
- manager = Manager(port, options=MockOptions(test_list=None, http=True), printer=Mock())
+ manager = Manager(port, options=MockOptions(test_list=None, http=True, max_locked_shards=1), printer=Mock())
manager._collect_tests(test_names)
return manager
host = MockHost()
port = host.port_factory.get('test-mac-leopard')
tests = ['failures/expected/crash.html']
expectations = test_expectations.TestExpectations(port, tests)
- rs = result_summary.ResultSummary(expectations, tests)
+ rs = ResultSummary(expectations, tests, 1, set())
manager = get_manager_with_tests(tests)
manager._look_for_new_crash_logs(rs, time.time())
- def test_servers_started(self):
-
- def start_http_server(number_of_servers=None):
- self.http_started = True
-
- def start_websocket_server():
- self.websocket_started = True
-
- def stop_http_server():
- self.http_stopped = True
-
- def stop_websocket_server():
- self.websocket_stopped = True
-
- host = MockHost()
- port = host.port_factory.get('test-mac-leopard')
- port.start_http_server = start_http_server
- port.start_websocket_server = start_websocket_server
- port.stop_http_server = stop_http_server
- port.stop_websocket_server = stop_websocket_server
-
- self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
- manager = Manager(port=port, options=MockOptions(http=True), printer=Mock())
- manager._test_files = ['http/tests/pass.txt']
- manager.start_servers_with_lock(number_of_servers=4)
- self.assertEquals(self.http_started, True)
- self.assertEquals(self.websocket_started, False)
- manager.stop_servers_with_lock()
- self.assertEquals(self.http_stopped, True)
- self.assertEquals(self.websocket_stopped, False)
-
- self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
- manager = Manager(port=port, options=MockOptions(http=True), printer=Mock())
- manager._test_files = ['websocket/pass.txt']
- manager.start_servers_with_lock(number_of_servers=4)
- self.assertEquals(self.http_started, True)
- self.assertEquals(self.websocket_started, True)
- manager.stop_servers_with_lock()
- self.assertEquals(self.http_stopped, True)
- self.assertEquals(self.websocket_stopped, True)
-
- self.http_started = self.http_stopped = self.websocket_started = self.websocket_stopped = False
- manager = Manager(port=port, options=MockOptions(http=True), printer=Mock())
- manager._test_files = ['perf/foo/test.html']
- manager.start_servers_with_lock(number_of_servers=4)
- self.assertEquals(self.http_started, False)
- self.assertEquals(self.websocket_started, False)
- manager.stop_servers_with_lock()
- self.assertEquals(self.http_stopped, False)
- self.assertEquals(self.websocket_stopped, False)
-
-
-
-class NaturalCompareTest(unittest.TestCase):
- def assert_cmp(self, x, y, result):
- self.assertEquals(cmp(natural_sort_key(x), natural_sort_key(y)), result)
-
- def test_natural_compare(self):
- self.assert_cmp('a', 'a', 0)
- self.assert_cmp('ab', 'a', 1)
- self.assert_cmp('a', 'ab', -1)
- self.assert_cmp('', '', 0)
- self.assert_cmp('', 'ab', -1)
- self.assert_cmp('1', '2', -1)
- self.assert_cmp('2', '1', 1)
- self.assert_cmp('1', '10', -1)
- self.assert_cmp('2', '10', -1)
- self.assert_cmp('foo_1.html', 'foo_2.html', -1)
- self.assert_cmp('foo_1.1.html', 'foo_2.html', -1)
- self.assert_cmp('foo_1.html', 'foo_10.html', -1)
- self.assert_cmp('foo_2.html', 'foo_10.html', -1)
- self.assert_cmp('foo_23.html', 'foo_10.html', 1)
- self.assert_cmp('foo_23.html', 'foo_100.html', -1)
-
-
-class KeyCompareTest(unittest.TestCase):
- def setUp(self):
- host = MockHost()
- self.port = host.port_factory.get('test')
-
- def assert_cmp(self, x, y, result):
- self.assertEquals(cmp(test_key(self.port, x), test_key(self.port, y)), result)
-
- def test_test_key(self):
- self.assert_cmp('/a', '/a', 0)
- self.assert_cmp('/a', '/b', -1)
- self.assert_cmp('/a2', '/a10', -1)
- self.assert_cmp('/a2/foo', '/a10/foo', -1)
- self.assert_cmp('/a/foo11', '/a/foo2', 1)
- self.assert_cmp('/ab', '/a/a/b', -1)
- self.assert_cmp('/a/a/b', '/ab', 1)
- self.assert_cmp('/foo-bar/baz', '/foo/baz', -1)
-
class ResultSummaryTest(unittest.TestCase):
@@ -509,7 +134,7 @@ class ResultSummaryTest(unittest.TestCase):
def get_result_summary(self, port, test_names, expectations_str):
port.expectations_dict = lambda: {'': expectations_str}
expectations = test_expectations.TestExpectations(port, test_names)
- return test_names, result_summary.ResultSummary(expectations, test_names), expectations
+ return test_names, ResultSummary(expectations, test_names, 1, set()), expectations
# FIXME: Use this to test more of summarize_results. This was moved from printing_unittest.py.
def summarized_results(self, port, expected, passing, flaky, extra_tests=[], extra_expectations=None):
@@ -545,8 +170,8 @@ class ResultSummaryTest(unittest.TestCase):
retry.add(self.get_result('passes/text.html'), True, test_is_slow)
retry.add(self.get_result('failures/expected/timeout.html'), True, test_is_slow)
retry.add(self.get_result('failures/expected/crash.html'), True, test_is_slow)
- unexpected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
- expected_results = manager.summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False)
+ unexpected_results = summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=True, interrupted=False)
+ expected_results = summarize_results(port, exp, rs, retry, test_timings={}, only_unexpected=False, interrupted=False)
return expected_results, unexpected_results
def test_no_svn_revision(self):
@@ -569,6 +194,3 @@ class ResultSummaryTest(unittest.TestCase):
port._filesystem.write_text_file(port._filesystem.join(port.layout_tests_dir(), "failures/expected/wontfix.html"), "Dummy test contents")
expected_results, unexpected_results = self.summarized_results(port, expected=False, passing=False, flaky=False, extra_tests=['failures/expected/wontfix.html'], extra_expectations='BUGX WONTFIX : failures/expected/wontfix.html = TEXT\n')
self.assertTrue(expected_results['tests']['failures']['expected']['wontfix.html']['wontfix'])
-
-if __name__ == '__main__':
- port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
index 88cbabf23..b36130ded 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/single_test_runner.py
@@ -264,12 +264,18 @@ class SingleTestRunner(object):
failures.append(test_failures.FailureMissingImageHash())
elif driver_output.image_hash != expected_driver_output.image_hash:
diff_result = self._port.diff_image(driver_output.image, expected_driver_output.image)
- driver_output.image_diff = diff_result[0]
- if driver_output.image_diff:
- failures.append(test_failures.FailureImageHashMismatch(diff_result[1]))
+ err_str = diff_result[2]
+ if err_str:
+ _log.warning(' %s : %s' % (self._test_name, err_str))
+ failures.append(test_failures.FailureImageHashMismatch())
+ driver_output.error = (driver_output.error or '') + err_str
else:
- # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
- _log.warning(' %s -> pixel hash failed (but pixel test still passes)' % self._test_name)
+ driver_output.image_diff = diff_result[0]
+ if driver_output.image_diff:
+ failures.append(test_failures.FailureImageHashMismatch(diff_result[1]))
+ else:
+ # See https://bugs.webkit.org/show_bug.cgi?id=69444 for why this isn't a full failure.
+ _log.warning(' %s -> pixel hash failed (but pixel test still passes)' % self._test_name)
return failures
def _run_reftest(self):
@@ -317,4 +323,8 @@ class SingleTestRunner(object):
failures.append(test_failures.FailureReftestMismatchDidNotOccur(reference_filename))
elif driver_output1.image_hash != driver_output2.image_hash:
failures.append(test_failures.FailureReftestMismatch(reference_filename))
+
+ # recompute in case we added to stderr during diff_image
+ has_stderr = driver_output1.has_stderr() or driver_output2.has_stderr()
+
return TestResult(self._test_name, failures, total_test_time, has_stderr)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
index 243a11d8d..67f42e3b6 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer.py
@@ -69,7 +69,7 @@ def write_test_result(filesystem, port, test_name, driver_output,
# FIXME: This work should be done earlier in the pipeline (e.g., when we compare images for non-ref tests).
# FIXME: We should always have 2 images here.
if driver_output.image and expected_driver_output.image:
- diff_image, diff_percent = port.diff_image(driver_output.image, expected_driver_output.image, tolerance=0)
+ diff_image, diff_percent, err_str = port.diff_image(driver_output.image, expected_driver_output.image, tolerance=0)
if diff_image:
writer.write_image_diff_files(diff_image)
failure.diff_percent = diff_percent
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
index 3b9b522ad..dfd604187 100644
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/controllers/test_result_writer_unittest.py
@@ -42,7 +42,7 @@ class TestResultWriterTest(unittest.TestCase):
class ImageDiffTestPort(TestPort):
def diff_image(self, expected_contents, actual_contents, tolerance=None):
used_tolerance_values.append(tolerance)
- return (True, 1)
+ return (True, 1, None)
host = MockHost()
port = ImageDiffTestPort(host)
diff --git a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py b/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
deleted file mode 100644
index b1583fff3..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/controllers/worker.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright (C) 2011 Google Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"""Handle messages from the Manager and executes actual tests."""
-
-import logging
-import threading
-import time
-
-from webkitpy.layout_tests.controllers import single_test_runner
-from webkitpy.layout_tests.models import test_expectations
-from webkitpy.layout_tests.models import test_results
-
-
-_log = logging.getLogger(__name__)
-
-
-class Worker(object):
- def __init__(self, caller, results_directory, options):
- self._caller = caller
- self._worker_number = caller.worker_number
- self._name = caller.name
- self._results_directory = results_directory
- self._options = options
-
- # The remaining fields are initialized in start()
- self._host = None
- self._port = None
- self._batch_size = None
- self._batch_count = None
- self._filesystem = None
- self._driver = None
- self._tests_run_file = None
- self._tests_run_filename = None
-
- def __del__(self):
- self.stop()
-
- def start(self):
- """This method is called when the object is starting to be used and it is safe
- for the object to create state that does not need to be pickled (usually this means
- it is called in a child process)."""
- self._host = self._caller.host
- self._filesystem = self._host.filesystem
- self._port = self._host.port_factory.get(self._options.platform, self._options)
-
- self._batch_count = 0
- self._batch_size = self._options.batch_size or 0
- tests_run_filename = self._filesystem.join(self._results_directory, "tests_run%d.txt" % self._worker_number)
- self._tests_run_file = self._filesystem.open_text_file_for_writing(tests_run_filename)
-
- def handle(self, name, source, test_list_name, test_inputs):
- assert name == 'test_list'
- start_time = time.time()
- for test_input in test_inputs:
- self._run_test(test_input)
- elapsed_time = time.time() - start_time
- self._caller.post('finished_test_list', test_list_name, len(test_inputs), elapsed_time)
-
- def _update_test_input(self, test_input):
- if test_input.reference_files is None:
- # Lazy initialization.
- test_input.reference_files = self._port.reference_files(test_input.test_name)
- if test_input.reference_files:
- test_input.should_run_pixel_test = True
- else:
- test_input.should_run_pixel_test = self._port.should_run_as_pixel_test(test_input)
-
- def _run_test(self, test_input):
- self._update_test_input(test_input)
- test_timeout_sec = self._timeout(test_input)
- start = time.time()
- self._caller.post('started_test', test_input, test_timeout_sec)
-
- result = self._run_test_with_timeout(test_input, test_timeout_sec)
-
- elapsed_time = time.time() - start
- self._caller.post('finished_test', result, elapsed_time)
-
- self._clean_up_after_test(test_input, result)
-
- def stop(self):
- _log.debug("%s cleaning up" % self._name)
- self._kill_driver()
- if self._tests_run_file:
- self._tests_run_file.close()
- self._tests_run_file = None
-
- def _timeout(self, test_input):
- """Compute the appropriate timeout value for a test."""
- # The DumpRenderTree watchdog uses 2.5x the timeout; we want to be
- # larger than that. We also add a little more padding if we're
- # running tests in a separate thread.
- #
- # Note that we need to convert the test timeout from a
- # string value in milliseconds to a float for Python.
- driver_timeout_sec = 3.0 * float(test_input.timeout) / 1000.0
- if not self._options.run_singly:
- return driver_timeout_sec
-
- thread_padding_sec = 1.0
- thread_timeout_sec = driver_timeout_sec + thread_padding_sec
- return thread_timeout_sec
-
- def _kill_driver(self):
- # Be careful about how and when we kill the driver; if driver.stop()
- # raises an exception, this routine may get re-entered via __del__.
- driver = self._driver
- self._driver = None
- if driver:
- _log.debug("%s killing driver" % self._name)
- driver.stop()
-
- def _run_test_with_timeout(self, test_input, timeout):
- if self._options.run_singly:
- return self._run_test_in_another_thread(test_input, timeout)
- return self._run_test_in_this_thread(test_input)
-
- def _clean_up_after_test(self, test_input, result):
- self._batch_count += 1
- test_name = test_input.test_name
- self._tests_run_file.write(test_name + "\n")
-
- if result.failures:
- # Check and kill DumpRenderTree if we need to.
- if any([f.driver_needs_restart() for f in result.failures]):
- self._kill_driver()
- # Reset the batch count since the shell just bounced.
- self._batch_count = 0
-
- # Print the error message(s).
- _log.debug("%s %s failed:" % (self._name, test_name))
- for f in result.failures:
- _log.debug("%s %s" % (self._name, f.message()))
- elif result.type == test_expectations.SKIP:
- _log.debug("%s %s skipped" % (self._name, test_name))
- else:
- _log.debug("%s %s passed" % (self._name, test_name))
-
- if self._batch_size > 0 and self._batch_count >= self._batch_size:
- self._kill_driver()
- self._batch_count = 0
-
- def _run_test_in_another_thread(self, test_input, thread_timeout_sec):
- """Run a test in a separate thread, enforcing a hard time limit.
-
- Since we can only detect the termination of a thread, not any internal
- state or progress, we can only run per-test timeouts when running test
- files singly.
-
- Args:
- test_input: Object containing the test filename and timeout
- thread_timeout_sec: time to wait before killing the driver process.
- Returns:
- A TestResult
- """
- worker = self
-
- driver = self._port.create_driver(self._worker_number)
-
- class SingleTestThread(threading.Thread):
- def __init__(self):
- threading.Thread.__init__(self)
- self.result = None
-
- def run(self):
- self.result = worker._run_single_test(driver, test_input)
-
- thread = SingleTestThread()
- thread.start()
- thread.join(thread_timeout_sec)
- result = thread.result
- if thread.isAlive():
- # If join() returned with the thread still running, the
- # DumpRenderTree is completely hung and there's nothing
- # more we can do with it. We have to kill all the
- # DumpRenderTrees to free it up. If we're running more than
- # one DumpRenderTree thread, we'll end up killing the other
- # DumpRenderTrees too, introducing spurious crashes. We accept
- # that tradeoff in order to avoid losing the rest of this
- # thread's results.
- _log.error('Test thread hung: killing all DumpRenderTrees')
-
- driver.stop()
-
- if not result:
- result = test_results.TestResult(test_input.test_name, failures=[], test_run_time=0)
- return result
-
- def _run_test_in_this_thread(self, test_input):
- """Run a single test file using a shared DumpRenderTree process.
-
- Args:
- test_input: Object containing the test filename, uri and timeout
-
- Returns: a TestResult object.
- """
- if self._driver and self._driver.has_crashed():
- self._kill_driver()
- if not self._driver:
- self._driver = self._port.create_driver(self._worker_number)
- return self._run_single_test(self._driver, test_input)
-
- def _run_single_test(self, driver, test_input):
- return single_test_runner.run_single_test(self._port, self._options,
- test_input, driver, self._name)
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
index b0512127f..5bb501061 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/result_summary.py
@@ -31,7 +31,7 @@ from webkitpy.layout_tests.models.test_expectations import TestExpectations, SKI
class ResultSummary(object):
- def __init__(self, expectations, test_files, iterations=1):
+ def __init__(self, expectations, test_files, iterations, expected_skips):
self.total = len(test_files) * iterations
self.remaining = self.total
self.expectations = expectations
@@ -48,8 +48,8 @@ class ResultSummary(object):
self.failures = {}
self.total_failures = 0
self.expected_skips = 0
- self.total_tests_by_expectation[SKIP] = 0
- self.tests_by_expectation[SKIP] = set()
+ self.total_tests_by_expectation[SKIP] = len(expected_skips)
+ self.tests_by_expectation[SKIP] = expected_skips
for expectation in TestExpectations.EXPECTATIONS.values():
self.tests_by_expectation[expectation] = set()
self.total_tests_by_expectation[expectation] = 0
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
index 9c6d478d4..bab741839 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py
@@ -96,23 +96,6 @@ def suffixes_for_expectations(expectations):
return set(suffixes)
-# FIXME: This method is no longer used here in this module. Remove remaining callsite in manager.py and this method.
-def strip_comments(line):
- """Strips comments from a line and return None if the line is empty
- or else the contents of line with leading and trailing spaces removed
- and all other whitespace collapsed"""
-
- commentIndex = line.find('//')
- if commentIndex is -1:
- commentIndex = len(line)
-
- line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
- if line == '':
- return None
- else:
- return line
-
-
class ParseError(Exception):
def __init__(self, warnings):
super(ParseError, self).__init__()
@@ -232,6 +215,10 @@ class TestExpectationParser(object):
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.modifiers = [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER]
+ # FIXME: It's not clear what the expectations for a skipped test should be; the expectations
+ # might be different for different entries in a Skipped file, or from the command line, or from
+ # only running parts of the tests. It's also not clear if it matters much.
+ expectation_line.modifiers.append(TestExpectationParser.WONTFIX_MODIFIER)
expectation_line.name = test_name
# FIXME: we should pass in a more descriptive string here.
expectation_line.filename = '<Skipped file>'
@@ -703,18 +690,16 @@ class TestExpectations(object):
'crash': CRASH,
'missing': MISSING}
- EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped'),
- PASS: ('pass', 'passes'),
- TEXT: ('text diff mismatch',
- 'text diff mismatch'),
- IMAGE: ('image mismatch', 'image mismatch'),
- IMAGE_PLUS_TEXT: ('image and text mismatch',
- 'image and text mismatch'),
- AUDIO: ('audio mismatch', 'audio mismatch'),
- CRASH: ('crash', 'crashes'),
- TIMEOUT: ('test timed out', 'tests timed out'),
- MISSING: ('no expected result found',
- 'no expected results found')}
+ # (aggregated by category, pass/fail/skip, type)
+ EXPECTATION_DESCRIPTIONS = {SKIP: ('skipped', 'skipped', ''),
+ PASS: ('passes', 'passed', ''),
+ TEXT: ('text failures', 'failed', ' (text diff)'),
+ IMAGE: ('image-only failures', 'failed', ' (image diff)'),
+ IMAGE_PLUS_TEXT: ('both image and text failures', 'failed', ' (both image and text diffs'),
+ AUDIO: ('audio failures', 'failed', ' (audio diff)'),
+ CRASH: ('crashes', 'crashed', ''),
+ TIMEOUT: ('timeouts', 'timed out', ''),
+ MISSING: ('no expected results found', 'no expected result found', '')}
EXPECTATION_ORDER = (PASS, CRASH, TIMEOUT, MISSING, IMAGE_PLUS_TEXT, TEXT, IMAGE, AUDIO, SKIP)
@@ -759,7 +744,7 @@ class TestExpectations(object):
self._expectations += expectations
# FIXME: move ignore_tests into port.skipped_layout_tests()
- self._add_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
+ self.add_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
self._has_warnings = False
self._report_warnings()
@@ -867,10 +852,13 @@ class TestExpectations(object):
return TestExpectationSerializer.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
- def remove_rebaselined_tests(self, except_these_tests):
- """Returns a copy of the expectations with the tests removed."""
+ def remove_rebaselined_tests(self, except_these_tests, filename):
+ """Returns a copy of the expectations in the file with the tests removed."""
def without_rebaseline_modifier(expectation):
- return not (not expectation.is_invalid() and expectation.name in except_these_tests and "rebaseline" in expectation.modifiers)
+ return not (not expectation.is_invalid() and
+ expectation.name in except_these_tests and
+ "rebaseline" in expectation.modifiers and
+ filename == expectation.filename)
return TestExpectationSerializer.list_to_string(filter(without_rebaseline_modifier, self._expectations))
@@ -882,7 +870,7 @@ class TestExpectations(object):
if self._is_lint_mode or self._test_config in expectation_line.matching_configurations:
self._model.add_expectation_line(expectation_line)
- def _add_skipped_tests(self, tests_to_skip):
+ def add_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
return
for test in self._expectations:
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
index c780dac23..b65151d72 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_expectations_unittest.py
@@ -284,7 +284,7 @@ class SkippedTests(Base):
# Check that the expectation is for BUG_DUMMY SKIP : ... = PASS
self.assertEquals(exp.get_modifiers('failures/expected/text.html'),
- [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER])
+ [TestExpectationParser.DUMMY_BUG_MODIFIER, TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER])
self.assertEquals(exp.get_expectations('failures/expected/text.html'), set([PASS]))
def test_skipped_tests_work(self):
@@ -456,9 +456,9 @@ BUGY WIN DEBUG : failures/expected/foo.html = CRASH
class RebaseliningTest(Base):
"""Test rebaselining-specific functionality."""
- def assertRemove(self, input_expectations, tests, expected_expectations):
+ def assertRemove(self, input_expectations, tests, expected_expectations, filename):
self.parse_exp(input_expectations, is_lint_mode=False)
- actual_expectations = self._exp.remove_rebaselined_tests(tests)
+ actual_expectations = self._exp.remove_rebaselined_tests(tests, filename)
self.assertEqual(expected_expectations, actual_expectations)
def test_remove(self):
@@ -467,7 +467,19 @@ class RebaseliningTest(Base):
'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n',
['failures/expected/text.html'],
'BUGY : failures/expected/image.html = IMAGE\n'
- 'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n')
+ 'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n',
+ 'expectations')
+
+ # test that we don't remove lines from the expectations if we're asking for the overrides
+ self.assertRemove('BUGX REBASELINE : failures/expected/text.html = TEXT\n'
+ 'BUGY : failures/expected/image.html = IMAGE\n'
+ 'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n',
+ ['failures/expected/text.html'],
+ 'BUGX REBASELINE : failures/expected/text.html = TEXT\n'
+ 'BUGY : failures/expected/image.html = IMAGE\n'
+ 'BUGZ REBASELINE : failures/expected/crash.html = CRASH\n',
+ 'overrides')
+
def test_no_get_rebaselining_failures(self):
self.parse_exp(self.get_basic_expectations())
diff --git a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
index 5a016f621..56f2d52bd 100644
--- a/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
+++ b/Tools/Scripts/webkitpy/layout_tests/models/test_input.py
@@ -32,21 +32,15 @@
class TestInput(object):
"""Groups information about a test for easy passing of data."""
- def __init__(self, test_name, timeout):
- """Holds the input parameters for a test.
- Args:
- test: name of test (not an absolute path!)
- timeout: Timeout in msecs the driver should use while running the test
- """
- self.test_name = test_name
- self.timeout = timeout
-
+ def __init__(self, test_name, timeout=None, requires_lock=None, reference_files=None, should_run_pixel_tests=None):
# TestInput objects are normally constructed by the manager and passed
- # to the workers, but these two fields are set lazily in the workers
- # because they require us to figure out if the test is a reftest or not
- # and we want to be able to do that in parallel.
- self.should_run_pixel_tests = None
- self.reference_files = None
+ # to the workers, but these some fields are set lazily in the workers where possible
+ # because they require us to look at the filesystem and we want to be able to do that in parallel.
+ self.test_name = test_name
+ self.timeout = timeout # in msecs; should rename this for consistency
+ self.requires_lock = requires_lock
+ self.reference_files = reference_files
+ self.should_run_pixel_tests = should_run_pixel_tests
def __repr__(self):
- return "TestInput('%s', %d, %s, %s)" % (self.test_name, self.timeout, self.should_run_pixel_tests, self.reference_files)
+ return "TestInput('%s', timeout=%s, requires_lock=%s, reference_files=%s, should_run_pixel_tests=%s)" % (self.test_name, self.timeout, self.requires_lock, self.reference_files, self.should_run_pixel_tests)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base.py b/Tools/Scripts/webkitpy/layout_tests/port/base.py
index ad70f042d..cf7104c28 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/base.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base.py
@@ -142,12 +142,20 @@ class Port(object):
self._test_configuration = None
self._reftest_list = {}
self._results_directory = None
+ self._root_was_set = hasattr(options, 'root') and options.root
+
+ def additional_drt_flag(self):
+ return []
def default_pixel_tests(self):
# FIXME: Disable until they are run by default on build.webkit.org.
return False
def default_timeout_ms(self):
+ if self.get_option('webkit_test_runner'):
+ # Add some more time to WebKitTestRunner because it needs to syncronise the state
+ # with the web process and we want to detect if there is a problem with that in the driver.
+ return 50 * 1000
return 35 * 1000
def wdiff_available(self):
@@ -207,7 +215,7 @@ class Port(object):
"""This routine is used to ensure that the build is up to date
and all the needed binaries are present."""
# If we're using a pre-built copy of WebKit (--root), we assume it also includes a build of DRT.
- if not self.get_option('root') and self.get_option('build') and not self._build_driver():
+ if not self._root_was_set and self.get_option('build') and not self._build_driver():
return False
if not self._check_driver():
return False
@@ -311,7 +319,7 @@ class Port(object):
return expected_audio != actual_audio
def diff_image(self, expected_contents, actual_contents, tolerance=None):
- """Compare two images and return a tuple of an image diff, and a percentage difference (0-100).
+ """Compare two images and return a tuple of an image diff, a percentage difference (0-100), and an error string.
|tolerance| should be a percentage value (0.0 - 100.0).
If it is omitted, the port default tolerance value is used.
@@ -319,13 +327,14 @@ class Port(object):
If an error occurs (like ImageDiff isn't found, or crashes, we log an error and return True (for a diff).
"""
if not actual_contents and not expected_contents:
- return (None, 0)
+ return (None, 0, None)
if not actual_contents or not expected_contents:
- return (True, 0)
+ return (True, 0, None)
if not self._image_differ:
self._image_differ = image_diff.ImageDiffer(self)
self.set_option_default('tolerance', 0.1)
- tolerance = tolerance or self.get_option('tolerance')
+ if tolerance is None:
+ tolerance = self.get_option('tolerance')
return self._image_differ.diff_image(expected_contents, actual_contents, tolerance)
def diff_text(self, expected_text, actual_text, expected_filename, actual_filename):
@@ -559,7 +568,7 @@ class Port(object):
expanded_paths.append(path)
if self.test_isdir(path) and not path.startswith('platform'):
for platform_dir in all_platform_dirs:
- if fs.isdir(fs.join(platform_dir, path)):
+ if fs.isdir(fs.join(platform_dir, path)) and platform_dir in self.baseline_search_path():
expanded_paths.append(self.relative_test_filename(fs.join(platform_dir, path)))
return expanded_paths
@@ -787,6 +796,9 @@ class Port(object):
self._results_directory = self._filesystem.abspath(option_val)
return self._results_directory
+ def perf_results_directory(self):
+ return self._build_path()
+
def default_results_directory(self):
"""Absolute path to the default place to store the test results."""
# Results are store relative to the built products to make it easy
@@ -1151,11 +1163,13 @@ class Port(object):
if not root_directory:
build_directory = self.get_option('build_directory')
if build_directory:
- root_directory = self._filesystem.join(self.get_option('configuration'))
+ root_directory = self._filesystem.join(build_directory, self.get_option('configuration'))
else:
root_directory = self._config.build_directory(self.get_option('configuration'))
# Set --root so that we can pass this to subprocesses and avoid making the
# slow call to config.build_directory() N times in each worker.
+ # FIXME: This is like @memoized, but more annoying and fragile; there should be another
+ # way to propagate values without mutating the options list.
self.set_option_default('root', root_directory)
return self._filesystem.join(self._filesystem.abspath(root_directory), *comps)
@@ -1277,21 +1291,9 @@ class Port(object):
return suite.args
return []
- def supports_switching_pixel_tests_per_test(self):
- if self.get_option('webkit_test_runner'):
- return True
- return self._supports_switching_pixel_tests_per_test()
-
- def _supports_switching_pixel_tests_per_test(self):
- # FIXME: all ports should support it.
- return False
-
def should_run_as_pixel_test(self, test_input):
if not self._options.pixel_tests:
return False
- if not self.supports_switching_pixel_tests_per_test():
- # Cannot do more filtering without this.
- return True
if self._options.pixel_test_directories:
return any(test_input.test_name.startswith(directory) for directory in self._options.pixel_test_directories)
return self._should_run_as_pixel_test(test_input)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
index 019873567..c68e441aa 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/base_unittest.py
@@ -437,6 +437,10 @@ class PortTest(unittest.TestCase):
self.assertFalse('passes/text.html' in tests)
self.assertTrue('virtual/passes/text.html' in tests)
+ def test_build_path(self):
+ port = self.make_port(options=optparse.Values({'build_directory': '/my-build-directory/'}))
+ self.assertEqual(port._build_path(), '/my-build-directory/Release')
+
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
index b72783c5d..38ce4b198 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium.py
@@ -190,18 +190,16 @@ class ChromiumPort(Port):
override_step, logging)
def diff_image(self, expected_contents, actual_contents, tolerance=None):
- # FIXME: need unit tests for this.
-
# tolerance is not used in chromium. Make sure caller doesn't pass tolerance other than zero or None.
assert (tolerance is None) or tolerance == 0
# If only one of them exists, return that one.
if not actual_contents and not expected_contents:
- return (None, 0)
+ return (None, 0, None)
if not actual_contents:
- return (expected_contents, 0)
+ return (expected_contents, 0, None)
if not expected_contents:
- return (actual_contents, 0)
+ return (actual_contents, 0, None)
tempdir = self._filesystem.mkdtemp()
@@ -221,28 +219,22 @@ class ChromiumPort(Port):
comand = [executable, '--diff', native_actual_filename, native_expected_filename, native_diff_filename]
result = None
+ err_str = None
try:
exit_code = self._executive.run_command(comand, return_exit_code=True)
if exit_code == 0:
# The images are the same.
result = None
- elif exit_code != 1:
- _log.error("image diff returned an exit code of %s" % exit_code)
- # Returning None here causes the script to think that we
- # successfully created the diff even though we didn't.
- # FIXME: Consider raising an exception here, so that the error
- # is not accidentally overlooked while the test passes.
- result = None
- except OSError, e:
- if e.errno == errno.ENOENT or e.errno == errno.EACCES:
- _compare_available = False
+ elif exit_code == 1:
+ result = self._filesystem.read_binary_file(native_diff_filename)
else:
- raise
+ err_str = "image diff returned an exit code of %s" % exit_code
+ except OSError, e:
+ err_str = 'error running image diff: %s' % str(e)
finally:
- if exit_code == 1:
- result = self._filesystem.read_binary_file(native_diff_filename)
self._filesystem.rmtree(str(tempdir))
- return (result, 0) # FIXME: how to get % diff?
+
+ return (result, 0, err_str or None) # FIXME: how to get % diff?
def path_from_chromium_base(self, *comps):
"""Returns the full path to path made by joining the top of the
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
index fa85f10d5..7106a20f5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android.py
@@ -27,9 +27,10 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+import copy
import logging
import os
-import shlex
+import re
import threading
import time
@@ -44,25 +45,22 @@ _log = logging.getLogger(__name__)
# The root directory for test resources, which has the same structure as the
# source root directory of Chromium.
-# This path is defined in base/base_paths_android.cc and
-# webkit/support/platform_support_android.cc.
+# This path is defined in Chromium's base/test/test_support_android.cc.
DEVICE_SOURCE_ROOT_DIR = '/data/local/tmp/'
COMMAND_LINE_FILE = DEVICE_SOURCE_ROOT_DIR + 'chrome-native-tests-command-line'
# The directory to put tools and resources of DumpRenderTree.
-DEVICE_DRT_DIR = '/data/drt/'
+# If change this, must also change Tools/DumpRenderTree/chromium/TestShellAndroid.cpp
+# and Chromium's webkit/support/platform_support_android.cc.
+DEVICE_DRT_DIR = DEVICE_SOURCE_ROOT_DIR + 'drt/'
DEVICE_FORWARDER_PATH = DEVICE_DRT_DIR + 'forwarder'
DEVICE_DRT_STAMP_PATH = DEVICE_DRT_DIR + 'DumpRenderTree.stamp'
DRT_APP_PACKAGE = 'org.chromium.native_test'
DRT_ACTIVITY_FULL_NAME = DRT_APP_PACKAGE + '/.ChromeNativeTestActivity'
-DRT_APP_DIR = '/data/user/0/' + DRT_APP_PACKAGE + '/'
-DRT_APP_FILES_DIR = DEVICE_SOURCE_ROOT_DIR
-DRT_APP_CACHE_DIR = DRT_APP_DIR + 'cache/'
+DRT_APP_CACHE_DIR = DEVICE_DRT_DIR + 'cache/'
-# This only works for single core devices so far.
-# FIXME: Find a solution for multi-core devices.
-SCALING_GOVERNOR = "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"
+SCALING_GOVERNORS_PATTERN = "/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor"
# All the test cases are still served to DumpRenderTree through file protocol,
# but we use a file-to-http feature to bridge the file request to host's http
@@ -155,27 +153,19 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def __init__(self, host, port_name, **kwargs):
super(ChromiumAndroidPort, self).__init__(host, port_name, **kwargs)
- if not hasattr(self._options, 'additional_drt_flag'):
- self._options.additional_drt_flag = []
- self._options.additional_drt_flag.append('--encode-binary')
-
- # The Chromium port for Android always uses the hardware GPU path.
- self._options.additional_drt_flag.append('--enable-hardware-gpu')
-
- # Shard ref tests so that they run together to avoid repeatedly driver restarts.
- self._options.shard_ref_tests = True
-
self._operating_system = 'android'
self._version = 'icecreamsandwich'
- self._original_governor = None
- self._android_base_dir = None
self._host_port = factory.PortFactory(host).get('chromium', **kwargs)
- self._adb_command = ['adb']
- adb_args = self.get_option('adb_args')
- if adb_args:
- self._adb_command += shlex.split(adb_args)
+ if hasattr(self._options, 'adb_device'):
+ self._devices = self._options.adb_device
+ else:
+ self._devices = []
+
+ def additional_drt_flag(self):
+ # The Chromium port for Android always uses the hardware GPU path.
+ return ['--encode-binary', '--enable-hardware-gpu']
def default_timeout_ms(self):
# Android platform has less computing power than desktop platforms.
@@ -184,8 +174,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
return 10 * 1000
def default_child_processes(self):
- # Because of the nature of apk, we don't support more than one process.
- return 1
+ return len(self._get_devices())
def baseline_search_path(self):
return map(self._webkit_baseline_path, self.FALLBACK_PATHS)
@@ -217,15 +206,6 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
android_expectations_file = self.path_from_webkit_base('LayoutTests', 'platform', 'chromium', 'test_expectations_android.txt')
return super(ChromiumAndroidPort, self).expectations_files() + [android_expectations_file]
- def test_expectations(self):
- # Automatically apply all expectation rules of chromium-linux to
- # chromium-android.
- # FIXME: This is a temporary measure to reduce the manual work when
- # updating WebKit. This method should be removed when we merge
- # test_expectations_android.txt into TestExpectations.
- expectations = super(ChromiumAndroidPort, self).test_expectations()
- return expectations.replace('LINUX ', 'LINUX ANDROID ')
-
def start_http_server(self, additional_dirs=None, number_of_servers=0):
# The http server runs during the whole testing period, so ignore this call.
pass
@@ -235,39 +215,48 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
pass
def setup_test_run(self):
- self._run_adb_command(['root'])
- self._setup_performance()
- # Required by webkit_support::GetWebKitRootDirFilePath().
- # Other directories will be created automatically by adb push.
- self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_SOURCE_ROOT_DIR + 'chrome'])
- # Allow the DumpRenderTree app to fully access the directory.
- # The native code needs the permission to write temporary files here.
- self._run_adb_command(['shell', 'chmod', '777', DEVICE_SOURCE_ROOT_DIR])
-
- self._push_executable()
- self._push_fonts()
- self._synchronize_datetime()
-
- # Delete the disk cache if any to ensure a clean test run.
- # This is like what's done in ChromiumPort.setup_test_run but on the device.
- self._run_adb_command(['shell', 'rm', '-r', DRT_APP_CACHE_DIR])
-
# Start the HTTP server so that the device can access the test cases.
super(ChromiumAndroidPort, self).start_http_server(additional_dirs={TEST_PATH_PREFIX: self.layout_tests_dir()})
- _log.debug('Starting forwarder')
- self._run_adb_command(['shell', '%s %s' % (DEVICE_FORWARDER_PATH, FORWARD_PORTS)])
-
def clean_up_test_run(self):
- # Leave the forwarder and tests httpd server there because they are
- # useful for debugging and do no harm to subsequent tests.
- self._teardown_performance()
+ super(ChromiumAndroidPort, self).stop_http_server()
def skipped_layout_tests(self, test_list):
+ # This method is more convenient to skip whole directories than SKIP in TestExpectations
+ # because its higher priority.
+ # Still use TestExpectations to skip individual tests.
return self._real_tests([
- # Canvas tests are run as virtual gpu tests.
- 'fast/canvas',
+ # Only run these tests as virtual gpu tests.
'canvas/philip',
+ 'fast/canvas',
+
+ # Skip tests of other platforms to save time.
+ 'platform/gtk',
+ 'platform/mac',
+ 'platform/mac-wk2',
+ 'platform/qt',
+ 'platform/win',
+
+ # Features not supported.
+ 'compositing/plugins',
+ 'plugins',
+ 'http/tests/plugins',
+ 'platform/chromium/compositing/plugins',
+ 'platform/chromium/plugins',
+
+ 'http/tests/inspector',
+ 'http/tests/inspector-enabled',
+ 'inspector',
+ 'platform/chromium/inspector',
+
+ 'accessibility',
+ 'platform/chromium/accessibility',
+
+ 'fast/dom/MediaStream',
+ 'fast/mediastream',
+ 'fast/notifications',
+ 'fast/speech',
+ 'webaudio',
])
def create_driver(self, worker_number, no_timeout=False):
@@ -275,6 +264,10 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
# See comments in ChromiumAndroidDriver.start().
return ChromiumAndroidDriver(self, worker_number, pixel_tests=self.get_option('pixel_tests'), no_timeout=no_timeout)
+ def driver_cmd_line(self):
+ # Override to return the actual DumpRenderTree command line.
+ return self.create_driver(0)._drt_cmd_line(self.get_option('pixel_tests'), [])
+
# Overridden private functions.
def _build_path(self, *comps):
@@ -319,35 +312,84 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _driver_class(self):
return ChromiumAndroidDriver
- def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
- if not stdout:
- stdout = ''
- stdout += '********* Logcat:\n' + self._get_logcat()
- if not stderr:
- stderr = ''
- stderr += '********* Tombstone file:\n' + self._get_last_stacktrace()
- return super(ChromiumAndroidPort, self)._get_crash_log(name, pid, stdout, stderr, newer_than)
-
# Local private functions.
+ def _get_devices(self):
+ if not self._devices:
+ re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
+ result = self._executive.run_command(['adb', 'devices'], error_handler=self._executive.ignore_error)
+ self._devices = re_device.findall(result)
+ if not self._devices:
+ raise AssertionError('No devices attached. Result of "adb devices": %s' % result)
+ return self._devices
+
+ def _get_device_serial(self, worker_number):
+ devices = self._get_devices()
+ if worker_number >= len(devices):
+ raise AssertionError('Worker number exceeds available number of devices')
+ return devices[worker_number]
+
+
+class ChromiumAndroidDriver(driver.Driver):
+ def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
+ super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
+ self._cmd_line = None
+ self._in_fifo_path = DEVICE_DRT_DIR + 'DumpRenderTree.in'
+ self._out_fifo_path = DEVICE_DRT_DIR + 'DumpRenderTree.out'
+ self._err_fifo_path = DEVICE_DRT_DIR + 'DumpRenderTree.err'
+ self._read_stdout_process = None
+ self._read_stderr_process = None
+ self._forwarder_process = None
+ self._has_setup = False
+ self._original_governors = {}
+ self._adb_command = ['adb', '-s', port._get_device_serial(worker_number)]
+
+ def __del__(self):
+ self._teardown_performance()
+ super(ChromiumAndroidDriver, self).__del__()
+
+ def _setup_test(self):
+ if self._has_setup:
+ return
+
+ self._has_setup = True
+ self._run_adb_command(['root'])
+ self._setup_performance()
+ # Required by webkit_support::GetWebKitRootDirFilePath().
+ # Other directories will be created automatically by adb push.
+ self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_SOURCE_ROOT_DIR + 'chrome'])
+
+ # Allow the DumpRenderTree app to fully access the directory.
+ # The native code needs the permission to write temporary files and create pipes here.
+ self._run_adb_command(['shell', 'mkdir', '-p', DEVICE_DRT_DIR])
+ self._run_adb_command(['shell', 'chmod', '777', DEVICE_DRT_DIR])
+
+ self._push_executable()
+ self._push_fonts()
+ self._synchronize_datetime()
+
+ # Delete the disk cache if any to ensure a clean test run.
+ # This is like what's done in ChromiumPort.setup_test_run but on the device.
+ self._run_adb_command(['shell', 'rm', '-r', DRT_APP_CACHE_DIR])
+
def _push_executable(self):
- drt_host_path = self._path_to_driver()
- forwarder_host_path = self._path_to_forwarder()
+ drt_host_path = self._port._path_to_driver()
+ forwarder_host_path = self._port._path_to_forwarder()
host_stamp = int(float(max(os.stat(drt_host_path).st_mtime,
os.stat(forwarder_host_path).st_mtime)))
device_stamp = int(float(self._run_adb_command([
'shell', 'cat %s 2>/dev/null || echo 0' % DEVICE_DRT_STAMP_PATH])))
- if device_stamp < host_stamp:
+ if device_stamp != host_stamp:
_log.debug('Pushing executable')
self._push_to_device(forwarder_host_path, DEVICE_FORWARDER_PATH)
self._run_adb_command(['uninstall', DRT_APP_PACKAGE])
install_result = self._run_adb_command(['install', drt_host_path])
if install_result.find('Success') == -1:
raise AssertionError('Failed to install %s onto device: %s' % (drt_host_path, install_result))
- self._push_to_device(self._build_path('DumpRenderTree.pak'), DEVICE_DRT_DIR + 'DumpRenderTree.pak')
- self._push_to_device(self._build_path('DumpRenderTree_resources'), DEVICE_DRT_DIR + 'DumpRenderTree_resources')
- self._push_to_device(self._build_path('android_main_fonts.xml'), DEVICE_DRT_DIR + 'android_main_fonts.xml')
- self._push_to_device(self._build_path('android_fallback_fonts.xml'), DEVICE_DRT_DIR + 'android_fallback_fonts.xml')
+ self._push_to_device(self._port._build_path('DumpRenderTree.pak'), DEVICE_DRT_DIR + 'DumpRenderTree.pak')
+ self._push_to_device(self._port._build_path('DumpRenderTree_resources'), DEVICE_DRT_DIR + 'DumpRenderTree_resources')
+ self._push_to_device(self._port._build_path('android_main_fonts.xml'), DEVICE_DRT_DIR + 'android_main_fonts.xml')
+ self._push_to_device(self._port._build_path('android_fallback_fonts.xml'), DEVICE_DRT_DIR + 'android_fallback_fonts.xml')
# Version control of test resources is dependent on executables,
# because we will always rebuild executables when resources are
# updated.
@@ -357,7 +399,7 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _push_fonts(self):
if not self._check_version(DEVICE_FONTS_DIR, FONT_FILES_VERSION):
_log.debug('Pushing fonts')
- path_to_ahem_font = self._build_path('AHEM____.TTF')
+ path_to_ahem_font = self._port._build_path('AHEM____.TTF')
self._push_to_device(path_to_ahem_font, DEVICE_FONTS_DIR + 'AHEM____.TTF')
for (host_dir, font_file) in HOST_FONT_FILES:
self._push_to_device(host_dir + font_file, DEVICE_FONTS_DIR + font_file)
@@ -367,14 +409,14 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _push_test_resources(self):
_log.debug('Pushing test resources')
for resource in TEST_RESOURCES_TO_PUSH:
- self._push_to_device(self.layout_tests_dir() + '/' + resource, DEVICE_LAYOUT_TESTS_DIR + resource)
+ self._push_to_device(self._port.layout_tests_dir() + '/' + resource, DEVICE_LAYOUT_TESTS_DIR + resource)
def _synchronize_datetime(self):
# The date/time between host and device may not be synchronized.
# We need to make them synchronized, otherwise tests might fail.
try:
# Get seconds since 1970-01-01 00:00:00 UTC.
- host_datetime = self._executive.run_command(['date', '-u', '+%s'])
+ host_datetime = self._port._executive.run_command(['date', '-u', '+%s'])
except:
# Reset to 1970-01-01 00:00:00 UTC.
host_datetime = 0
@@ -394,10 +436,10 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _run_adb_command(self, cmd, ignore_error=False):
_log.debug('Run adb command: ' + str(cmd))
if ignore_error:
- error_handler = self._executive.ignore_error
+ error_handler = self._port._executive.ignore_error
else:
error_handler = None
- result = self._executive.run_command(self._adb_command + cmd, error_handler=error_handler)
+ result = self._port._executive.run_command(self._adb_command + cmd, error_handler=error_handler)
# Limit the length to avoid too verbose output of commands like 'adb logcat' and 'cat /data/tombstones/tombstone01'
# whose outputs are normally printed in later logs.
_log.debug('Run adb result: ' + result[:80])
@@ -443,37 +485,39 @@ class ChromiumAndroidPort(chromium.ChromiumPort):
def _setup_performance(self):
# Disable CPU scaling and drop ram cache to reduce noise in tests
- if not self._original_governor:
- self._original_governor = self._run_adb_command(['shell', 'cat', SCALING_GOVERNOR], ignore_error=True)
- if self._original_governor:
- self._run_adb_command(['shell', 'echo', 'performance', '>', SCALING_GOVERNOR])
+ if not self._original_governors:
+ governor_files = self._run_adb_command(['shell', 'ls', SCALING_GOVERNORS_PATTERN])
+ if governor_files.find('No such file or directory') == -1:
+ for file in governor_files.split():
+ self._original_governors[file] = self._run_adb_command(['shell', 'cat', file]).strip()
+ self._run_adb_command(['shell', 'echo', 'performance', '>', file])
def _teardown_performance(self):
- if self._original_governor:
- self._run_adb_command(['shell', 'echo', self._original_governor, SCALING_GOVERNOR])
- self._original_governor = None
-
-
-class ChromiumAndroidDriver(driver.Driver):
- def __init__(self, port, worker_number, pixel_tests, no_timeout=False):
- super(ChromiumAndroidDriver, self).__init__(port, worker_number, pixel_tests, no_timeout)
- self._pixel_tests = pixel_tests
- self._in_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.in'
- self._out_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.out'
- self._err_fifo_path = DRT_APP_FILES_DIR + 'DumpRenderTree.err'
- self._read_stdout_process = None
- self._read_stderr_process = None
+ for file, original_content in self._original_governors.items():
+ self._run_adb_command(['shell', 'echo', original_content, '>', file])
+ self._original_governors = {}
def _command_wrapper(cls, wrapper_option):
# Ignore command wrapper which is not applicable on Android.
return []
+ def _get_crash_log(self, stdout, stderr, newer_than):
+ if not stdout:
+ stdout = ''
+ stdout += '********* Logcat:\n' + self._get_logcat()
+ if not stderr:
+ stderr = ''
+ stderr += '********* Tombstone file:\n' + self._get_last_stacktrace()
+ return super(ChromiumAndroidDriver, self)._get_crash_log(stdout, stderr, newer_than)
+
def cmd_line(self, pixel_tests, per_test_args):
- return self._port._adb_command + ['shell']
+ # The returned command line is used to start _server_process. In our case, it's an interactive 'adb shell'.
+ # The command line passed to the DRT process is returned by _drt_cmd_line() instead.
+ return self._adb_command + ['shell']
def _file_exists_on_device(self, full_file_path):
assert full_file_path.startswith('/')
- return self._port._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path
+ return self._run_adb_command(['shell', 'ls', full_file_path]).strip() == full_file_path
def _drt_cmd_line(self, pixel_tests, per_test_args):
return driver.Driver.cmd_line(self, pixel_tests, per_test_args) + [
@@ -496,24 +540,35 @@ class ChromiumAndroidDriver(driver.Driver):
self._file_exists_on_device(self._err_fifo_path))
def _remove_all_pipes(self):
- self._port._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_fifo_path])
+ self._run_adb_command(['shell', 'rm', self._in_fifo_path, self._out_fifo_path, self._err_fifo_path])
return (not self._file_exists_on_device(self._in_fifo_path) and
not self._file_exists_on_device(self._out_fifo_path) and
not self._file_exists_on_device(self._err_fifo_path))
+ def run_test(self, driver_input):
+ base = self._port.lookup_virtual_test_base(driver_input.test_name)
+ if base:
+ driver_input = copy.copy(driver_input)
+ driver_input.args = self._port.lookup_virtual_test_args(driver_input.test_name)
+ driver_input.test_name = base
+ return super(ChromiumAndroidDriver, self).run_test(driver_input)
+
def start(self, pixel_tests, per_test_args):
# Only one driver instance is allowed because of the nature of Android activity.
- # The single driver needs to switch between pixel test and no pixel test mode by itself.
- if pixel_tests != self._pixel_tests:
+ # The single driver needs to restart DumpRenderTree when the command line changes.
+ cmd_line = self._drt_cmd_line(pixel_tests, per_test_args)
+ if cmd_line != self._cmd_line:
self.stop()
- self._pixel_tests = pixel_tests
+ self._cmd_line = cmd_line
super(ChromiumAndroidDriver, self).start(pixel_tests, per_test_args)
def _start(self, pixel_tests, per_test_args):
+ self._setup_test()
+
for retries in range(3):
if self._start_once(pixel_tests, per_test_args):
return
- _log.error('Failed to start DumpRenderTree application. Retries=%d. Log:%s' % (retries, self._port._get_logcat()))
+ _log.error('Failed to start DumpRenderTree application. Retries=%d. Log:%s' % (retries, self._get_logcat()))
self.stop()
time.sleep(2)
raise AssertionError('Failed to start DumpRenderTree application multiple times. Give up.')
@@ -521,9 +576,14 @@ class ChromiumAndroidDriver(driver.Driver):
def _start_once(self, pixel_tests, per_test_args):
super(ChromiumAndroidDriver, self)._start(pixel_tests, per_test_args)
- self._port._run_adb_command(['logcat', '-c'])
- self._port._run_adb_command(['shell', 'echo'] + self._drt_cmd_line(pixel_tests, per_test_args) + ['>', COMMAND_LINE_FILE])
- start_result = self._port._run_adb_command(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', DRT_ACTIVITY_FULL_NAME])
+ _log.debug('Starting forwarder')
+ self._forwarder_process = server_process.ServerProcess(
+ self._port, 'Forwarder', self._adb_command + ['shell', '%s -D %s' % (DEVICE_FORWARDER_PATH, FORWARD_PORTS)])
+ self._forwarder_process.start()
+
+ self._run_adb_command(['logcat', '-c'])
+ self._run_adb_command(['shell', 'echo'] + self._cmd_line + ['>', COMMAND_LINE_FILE])
+ start_result = self._run_adb_command(['shell', 'am', 'start', '-e', 'RunInSubThread', '-n', DRT_ACTIVITY_FULL_NAME])
if start_result.find('Exception') != -1:
_log.error('Failed to start DumpRenderTree application. Exception:\n' + start_result)
return False
@@ -540,13 +600,13 @@ class ChromiumAndroidDriver(driver.Driver):
# Start a process to read from the stdout fifo of the DumpRenderTree app and print to stdout.
_log.debug('Redirecting stdout to ' + self._out_fifo_path)
self._read_stdout_process = server_process.ServerProcess(
- self._port, 'ReadStdout', self._port._adb_command + ['shell', 'cat', self._out_fifo_path], universal_newlines=True)
+ self._port, 'ReadStdout', self._adb_command + ['shell', 'cat', self._out_fifo_path], universal_newlines=True)
self._read_stdout_process.start()
# Start a process to read from the stderr fifo of the DumpRenderTree app and print to stdout.
_log.debug('Redirecting stderr to ' + self._err_fifo_path)
self._read_stderr_process = server_process.ServerProcess(
- self._port, 'ReadStderr', self._port._adb_command + ['shell', 'cat', self._err_fifo_path], universal_newlines=True)
+ self._port, 'ReadStderr', self._adb_command + ['shell', 'cat', self._err_fifo_path], universal_newlines=True)
self._read_stderr_process.start()
_log.debug('Redirecting stdin to ' + self._in_fifo_path)
@@ -587,7 +647,7 @@ class ChromiumAndroidDriver(driver.Driver):
return True
def stop(self):
- self._port._run_adb_command(['shell', 'am', 'force-stop', DRT_APP_PACKAGE])
+ self._run_adb_command(['shell', 'am', 'force-stop', DRT_APP_PACKAGE])
if self._read_stdout_process:
self._read_stdout_process.kill()
@@ -604,6 +664,10 @@ class ChromiumAndroidDriver(driver.Driver):
self._server_process = None
super(ChromiumAndroidDriver, self).stop()
+ if self._forwarder_process:
+ self._forwarder_process.stop(kill_directly=True)
+ self._forwarder_process = None
+
if not ChromiumAndroidDriver._loop_with_timeout(self._remove_all_pipes, DRT_START_STOP_TIMEOUT_SECS):
raise AssertionError('Failed to remove fifo files. May be locked.')
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
index 65b6a2d7b..c79546bb8 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_android_unittest.py
@@ -39,12 +39,82 @@ from webkitpy.layout_tests.port import chromium_android
from webkitpy.layout_tests.port import chromium_port_testcase
from webkitpy.layout_tests.port import driver
from webkitpy.layout_tests.port import driver_unittest
+from webkitpy.tool.mocktool import MockOptions
+
+class MockRunCommand(object):
+ def __init__(self):
+ self._mock_logcat = ''
+ self._mock_devices_output = ''
+ self._mock_devices = []
+ self._mock_ls_tombstones = ''
+
+ def mock_run_command_fn(self, args):
+ if args[0] != 'adb':
+ return ''
+ if args[1] == 'devices':
+ return self._mock_devices_output
+
+ assert len(args) > 3
+ assert args[1] == '-s'
+ assert args[2] in self._mock_devices
+ if args[3] == 'shell':
+ if args[4:] == ['ls', '-n', '/data/tombstones']:
+ return self._mock_ls_tombstones
+ elif args[4] == 'cat':
+ return args[5] + '\nmock_contents\n'
+ elif args[3] == 'logcat':
+ return self._mock_logcat
+ return ''
+
+ def mock_no_device(self):
+ self._mock_devices = []
+ self._mock_devices_output = 'List of devices attached'
+
+ def mock_one_device(self):
+ self._mock_devices = ['123456789ABCDEF0']
+ self._mock_devices_output = ('List of devices attached\n'
+ '%s\tdevice\n' % self._mock_devices[0])
+
+ def mock_two_devices(self):
+ self._mock_devices = ['123456789ABCDEF0', '23456789ABCDEF01']
+ self._mock_devices_output = ('* daemon not running. starting it now on port 5037 *'
+ '* daemon started successfully *'
+ 'List of devices attached\n'
+ '%s\tdevice\n'
+ '%s\tdevice\n' % (self._mock_devices[0], self._mock_devices[1]))
+
+ def mock_no_tombstone_dir(self):
+ self._mock_ls_tombstones = '/data/tombstones: No such file or directory'
+
+ def mock_no_tombstone_file(self):
+ self._mock_ls_tombstones = ''
+
+ def mock_ten_tombstones(self):
+ self._mock_ls_tombstones = ('-rw------- 1000 1000 218643 2012-04-26 18:15 tombstone_00\n'
+ '-rw------- 1000 1000 241695 2012-04-26 18:15 tombstone_01\n'
+ '-rw------- 1000 1000 219472 2012-04-26 18:16 tombstone_02\n'
+ '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
+ '-rw------- 1000 1000 82022 2012-04-23 16:57 tombstone_04\n'
+ '-rw------- 1000 1000 82015 2012-04-23 16:57 tombstone_05\n'
+ '-rw------- 1000 1000 81974 2012-04-23 16:58 tombstone_06\n'
+ '-rw------- 1000 1000 237409 2012-04-26 17:41 tombstone_07\n'
+ '-rw------- 1000 1000 276089 2012-04-26 18:15 tombstone_08\n'
+ '-rw------- 1000 1000 219618 2012-04-26 18:15 tombstone_09\n')
+
+ def mock_logcat(self, content):
+ self._mock_logcat = content
class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
port_name = 'chromium-android'
port_maker = chromium_android.ChromiumAndroidPort
- mock_logcat = ''
+
+ def make_port(self, **kwargs):
+ port = super(ChromiumAndroidPortTest, self).make_port(**kwargs)
+ self.mock_run_command = MockRunCommand()
+ self.mock_run_command.mock_one_device()
+ port._executive = MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)
+ return port
def test_attributes(self):
port = self.make_port()
@@ -59,51 +129,69 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
# using a custom expectations file.
pass
- @staticmethod
- def mock_run_command_fn(args):
- if args[1] == 'shell':
- if args[2:] == ['ls', '-n', '/data/tombstones']:
- # For 'adb shell ls -n /data/tombstones'
- return ('-rw------- 1000 1000 218643 2012-04-26 18:15 tombstone_00\n'
- '-rw------- 1000 1000 241695 2012-04-26 18:15 tombstone_01\n'
- '-rw------- 1000 1000 219472 2012-04-26 18:16 tombstone_02\n'
- '-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
- '-rw------- 1000 1000 82022 2012-04-23 16:57 tombstone_04\n'
- '-rw------- 1000 1000 82015 2012-04-23 16:57 tombstone_05\n'
- '-rw------- 1000 1000 81974 2012-04-23 16:58 tombstone_06\n'
- '-rw------- 1000 1000 237409 2012-04-26 17:41 tombstone_07\n'
- '-rw------- 1000 1000 276089 2012-04-26 18:15 tombstone_08\n'
- '-rw------- 1000 1000 219618 2012-04-26 18:15 tombstone_09\n')
- elif args[2] == 'cat':
- return args[3] + '\nmock_contents\n'
- elif args[1] == 'logcat':
- return ChromiumAndroidPortTest.mock_logcat
- else:
- return ''
+ def test_get_devices_no_device(self):
+ port = self.make_port()
+ self.mock_run_command.mock_no_device()
+ self.assertRaises(AssertionError, port._get_devices)
- def test_get_last_stacktrace(self):
+ def test_get_devices_one_device(self):
+ port = self.make_port()
+ self.mock_run_command.mock_one_device()
+ self.assertEquals(self.mock_run_command._mock_devices, port._get_devices())
+ self.assertEquals(1, port.default_child_processes())
+
+ def test_get_devices_two_devices(self):
port = self.make_port()
+ self.mock_run_command.mock_two_devices()
+ self.assertEquals(self.mock_run_command._mock_devices, port._get_devices())
+ self.assertEquals(2, port.default_child_processes())
- def mock_run_command_no_dir(args):
- return '/data/tombstones: No such file or directory'
- port._executive = MockExecutive2(run_command_fn=mock_run_command_no_dir)
- self.assertEquals(port._get_last_stacktrace(), '')
+ def test_get_device_serial_no_device(self):
+ port = self.make_port()
+ self.mock_run_command.mock_no_device()
+ self.assertRaises(AssertionError, port._get_device_serial, 0)
- def mock_run_command_no_file(args):
- return ''
- port._executive = MockExecutive2(run_command_fn=mock_run_command_no_file)
- self.assertEquals(port._get_last_stacktrace(), '')
+ def test_get_device_serial_one_device(self):
+ port = self.make_port()
+ self.mock_run_command.mock_one_device()
+ self.assertEquals(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
+ self.assertRaises(AssertionError, port._get_device_serial, 1)
+
+ def test_get_device_serial_two_devices(self):
+ port = self.make_port()
+ self.mock_run_command.mock_two_devices()
+ self.assertEquals(self.mock_run_command._mock_devices[0], port._get_device_serial(0))
+ self.assertEquals(self.mock_run_command._mock_devices[1], port._get_device_serial(1))
+ self.assertRaises(AssertionError, port._get_device_serial, 2)
+
+
+class ChromiumAndroidDriverTest(unittest.TestCase):
+ def setUp(self):
+ self.mock_run_command = MockRunCommand()
+ self.mock_run_command.mock_one_device()
+ self.port = chromium_android.ChromiumAndroidPort(
+ MockSystemHost(executive=MockExecutive2(run_command_fn=self.mock_run_command.mock_run_command_fn)),
+ 'chromium-android')
+ self.driver = chromium_android.ChromiumAndroidDriver(self.port, worker_number=0, pixel_tests=True)
- port._executive = MockExecutive2(run_command_fn=ChromiumAndroidPortTest.mock_run_command_fn)
- self.assertEquals(port._get_last_stacktrace(),
+ def test_get_last_stacktrace(self):
+ self.mock_run_command.mock_no_tombstone_dir()
+ self.assertEquals(self.driver._get_last_stacktrace(), '')
+
+ self.mock_run_command.mock_no_tombstone_file()
+ self.assertEquals(self.driver._get_last_stacktrace(), '')
+
+ self.mock_run_command.mock_ten_tombstones()
+ self.assertEquals(self.driver._get_last_stacktrace(),
'-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
'/data/tombstones/tombstone_03\nmock_contents\n')
def test_get_crash_log(self):
- port = self.make_port()
- port._executive = MockExecutive2(run_command_fn=ChromiumAndroidPortTest.mock_run_command_fn)
- ChromiumAndroidPortTest.mock_logcat = 'logcat contents\n'
- self.assertEquals(port._get_crash_log('foo', 1234, 'out bar\nout baz\n', 'err bar\nerr baz\n', newer_than=None),
+ self.mock_run_command.mock_logcat('logcat contents\n')
+ self.mock_run_command.mock_ten_tombstones()
+ self.driver._crashed_process_name = 'foo'
+ self.driver._crashed_pid = 1234
+ self.assertEquals(self.driver._get_crash_log('out bar\nout baz\n', 'err bar\nerr baz\n', newer_than=None),
('err bar\n'
'err baz\n'
'********* Tombstone file:\n'
@@ -121,7 +209,10 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
u'STDERR: -rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
u'STDERR: /data/tombstones/tombstone_03\n'
u'STDERR: mock_contents\n'))
- self.assertEquals(port._get_crash_log(None, None, None, None, newer_than=None),
+
+ self.driver._crashed_process_name = None
+ self.driver._crashed_pid = None
+ self.assertEquals(self.driver._get_crash_log(None, None, newer_than=None),
('********* Tombstone file:\n'
'-rw------- 1000 1000 45316 2012-04-27 16:33 tombstone_03\n'
'/data/tombstones/tombstone_03\n'
@@ -134,26 +225,16 @@ class ChromiumAndroidPortTest(chromium_port_testcase.ChromiumPortTestCase):
u'STDERR: /data/tombstones/tombstone_03\n'
u'STDERR: mock_contents\n'))
- def test_driver_cmd_line(self):
- # Overriding PortTestCase.test_cmd_line(). Use ChromiumAndroidDriverTest.test_cmd_line() instead.
- return
-
-
-class ChromiumAndroidDriverTest(unittest.TestCase):
- def setUp(self):
- mock_port = chromium_android.ChromiumAndroidPort(MockSystemHost(), 'chromium-android')
- self.driver = chromium_android.ChromiumAndroidDriver(mock_port, worker_number=0, pixel_tests=True)
-
def test_cmd_line(self):
cmd_line = self.driver.cmd_line(True, ['anything'])
- self.assertEquals(['adb', 'shell'], cmd_line)
+ self.assertEquals(['adb', '-s', self.mock_run_command._mock_devices[0], 'shell'], cmd_line)
def test_drt_cmd_line(self):
cmd_line = self.driver._drt_cmd_line(True, ['--a'])
self.assertTrue('--a' in cmd_line)
- self.assertTrue('--in-fifo=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.in' in cmd_line)
- self.assertTrue('--out-fifo=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.out' in cmd_line)
- self.assertTrue('--err-fifo=' + chromium_android.DRT_APP_FILES_DIR + 'DumpRenderTree.err' in cmd_line)
+ self.assertTrue('--in-fifo=' + chromium_android.DEVICE_DRT_DIR + 'DumpRenderTree.in' in cmd_line)
+ self.assertTrue('--out-fifo=' + chromium_android.DEVICE_DRT_DIR + 'DumpRenderTree.out' in cmd_line)
+ self.assertTrue('--err-fifo=' + chromium_android.DEVICE_DRT_DIR + 'DumpRenderTree.err' in cmd_line)
def test_read_prompt(self):
self.driver._server_process = driver_unittest.MockServerProcess(lines=['root@android:/ # '])
@@ -163,13 +244,46 @@ class ChromiumAndroidDriverTest(unittest.TestCase):
def test_command_from_driver_input(self):
driver_input = driver.DriverInput('foo/bar/test.html', 10, 'checksum', True)
- expected_command = "/data/local/tmp/third_party/WebKit/LayoutTests/foo/bar/test.html'checksum\n"
+ expected_command = "/data/local/tmp/third_party/WebKit/LayoutTests/foo/bar/test.html'--pixel-test'checksum\n"
self.assertEquals(self.driver._command_from_driver_input(driver_input), expected_command)
driver_input = driver.DriverInput('http/tests/foo/bar/test.html', 10, 'checksum', True)
- expected_command = "http://127.0.0.1:8000/foo/bar/test.html'checksum\n"
+ expected_command = "http://127.0.0.1:8000/foo/bar/test.html'--pixel-test'checksum\n"
self.assertEquals(self.driver._command_from_driver_input(driver_input), expected_command)
+class ChromiumAndroidDriverTwoDriversTest(unittest.TestCase):
+ def test_two_drivers(self):
+ mock_run_command = MockRunCommand()
+ mock_run_command.mock_two_devices()
+ port = chromium_android.ChromiumAndroidPort(
+ MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
+ 'chromium-android')
+ driver0 = chromium_android.ChromiumAndroidDriver(port, worker_number=0, pixel_tests=True)
+ driver1 = chromium_android.ChromiumAndroidDriver(port, worker_number=1, pixel_tests=True)
+
+ cmd_line0 = driver0.cmd_line(True, ['anything'])
+ self.assertEquals(['adb', '-s', mock_run_command._mock_devices[0], 'shell'], cmd_line0)
+
+ cmd_line1 = driver1.cmd_line(True, ['anything'])
+ self.assertEquals(['adb', '-s', mock_run_command._mock_devices[1], 'shell'], cmd_line1)
+
+
+class ChromiumAndroidTwoPortsTest(unittest.TestCase):
+ def test_options_with_two_ports(self):
+ options = MockOptions(additional_drt_flag=['--foo=bar', '--foo=baz'])
+ mock_run_command = MockRunCommand()
+ mock_run_command.mock_two_devices()
+ port0 = chromium_android.ChromiumAndroidPort(
+ MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
+ 'chromium-android', options=options)
+ port1 = chromium_android.ChromiumAndroidPort(
+ MockSystemHost(executive=MockExecutive2(run_command_fn=mock_run_command.mock_run_command_fn)),
+ 'chromium-android', options=options)
+ cmd_line = port1.driver_cmd_line()
+ self.assertEquals(cmd_line.count('--encode-binary'), 1)
+ self.assertEquals(cmd_line.count('--enable-hardware-gpu'), 1)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
index 7d4c235f3..fb90d1b9b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/chromium_port_testcase.py
@@ -158,6 +158,11 @@ class ChromiumPortTestCase(port_testcase.PortTestCase):
exception_raised = True
self.assertFalse(exception_raised)
+ def test_diff_image_crashed(self):
+ port = ChromiumPortTestCase.TestLinuxPort()
+ port._executive = MockExecutive2(exit_code=2)
+ self.assertEquals(port.diff_image("EXPECTED", "ACTUAL"), (None, 0, 'image diff returned an exit code of 2'))
+
def test_expectations_files(self):
port = self.make_port()
port.port_name = 'chromium'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver.py b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
index 781823b8d..85049970b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver.py
@@ -166,8 +166,7 @@ class Driver(object):
crash_log = None
if self.has_crashed():
- self.error_from_test, crash_log = self._port._get_crash_log(self._crashed_process_name,
- self._crashed_pid, text, self.error_from_test, newer_than=start_time)
+ self.error_from_test, crash_log = self._get_crash_log(text, self.error_from_test, newer_than=start_time)
# If we don't find a crash log use a placeholder error message instead.
if not crash_log:
@@ -188,6 +187,9 @@ class Driver(object):
crashed_process_name=self._crashed_process_name,
crashed_pid=self._crashed_pid, crash_log=crash_log)
+ def _get_crash_log(self, stdout, stderr, newer_than):
+ return self._port._get_crash_log(self._crashed_process_name, self._crashed_pid, stdout, stderr, newer_than)
+
# FIXME: Seems this could just be inlined into callers.
@classmethod
def _command_wrapper(cls, wrapper_option):
@@ -292,9 +294,8 @@ class Driver(object):
# FIXME: We need to pass --timeout=SECONDS to WebKitTestRunner for WebKit2.
cmd.extend(self._port.get_option('additional_drt_flag', []))
+ cmd.extend(self._port.additional_drt_flag())
- if pixel_tests and not self._port.supports_switching_pixel_tests_per_test():
- cmd.append('--pixel-tests')
cmd.extend(per_test_args)
cmd.append('-')
@@ -319,6 +320,8 @@ class Driver(object):
_log.debug('WebProcess crash, pid = %s, error_line = %s' % (str(pid), error_line))
if error_line.startswith("#PROCESS UNRESPONSIVE - WebProcess"):
self._subprocess_was_unresponsive = True
+ # We want to show this since it's not a regular crash and probably we don't have a crash log.
+ self.error_from_test += error_line
return True
return self.has_crashed()
@@ -335,11 +338,9 @@ class Driver(object):
assert not driver_input.image_hash or driver_input.should_run_pixel_test
+ # ' is the separator between arguments.
if driver_input.should_run_pixel_test:
- if self._port.supports_switching_pixel_tests_per_test():
- # We did not start the driver with --pixel-tests, instead we specify it per test.
- # "'" is the separator of command fields.
- command += "'" + '--pixel-test'
+ command += "'--pixel-test"
if driver_input.image_hash:
command += "'" + driver_input.image_hash
return command + "\n"
@@ -439,7 +440,7 @@ class ContentBlock(object):
self.decoded_content = None
def decode_content(self):
- if self.encoding == 'base64':
+ if self.encoding == 'base64' and self.content is not None:
self.decoded_content = base64.b64decode(self.content)
else:
self.decoded_content = self.content
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
index 705c1bb7b..5e2019b1b 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/driver_unittest.py
@@ -163,7 +163,7 @@ class DriverTest(unittest.TestCase):
def test_no_timeout(self):
port = TestWebKitPort()
driver = Driver(port, 0, pixel_tests=True, no_timeout=True)
- self.assertEquals(driver.cmd_line(True, []), ['/mock-build/DumpRenderTree', '--no-timeout', '--pixel-tests', '-'])
+ self.assertEquals(driver.cmd_line(True, []), ['/mock-build/DumpRenderTree', '--no-timeout', '-'])
def test_check_for_driver_crash(self):
port = TestWebKitPort()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/efl.py b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
index 25a81d2da..e5635744d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/efl.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/efl.py
@@ -27,6 +27,8 @@
"""WebKit Efl implementation of the Port interface."""
+import os
+
from webkitpy.layout_tests.models.test_configuration import TestConfiguration
from webkitpy.layout_tests.port.base import Port
from webkitpy.layout_tests.port.pulseaudio_sanitizer import PulseAudioSanitizer
@@ -51,6 +53,10 @@ class EflPort(Port, PulseAudioSanitizer):
def setup_environ_for_server(self, server_name=None):
env = super(EflPort, self).setup_environ_for_server(server_name)
+ # If DISPLAY environment variable is unset in the system
+ # e.g. on build bot, remove DISPLAY variable from the dictionary
+ if not 'DISPLAY' in os.environ:
+ del env['DISPLAY']
env['TEST_RUNNER_INJECTED_BUNDLE_FILENAME'] = self._build_path('lib', 'libTestRunnerInjectedBundle.so')
env['TEST_RUNNER_PLUGIN_PATH'] = self._build_path('lib')
if self.webprocess_cmd_prefix:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
index 29ab861ce..efebf2db0 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/gtk.py
@@ -82,9 +82,6 @@ class GtkPort(Port, PulseAudioSanitizer):
def _path_to_image_diff(self):
return self._build_path('Programs', 'ImageDiff')
- def check_build(self, needs_http):
- return self._check_driver()
-
def _path_to_apache(self):
if self._is_redhat_based():
return '/usr/sbin/httpd'
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py b/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
index 2cccc1f5e..08202ace5 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/image_diff.py
@@ -62,8 +62,7 @@ class ImageDiffer(object):
len(expected_contents), expected_contents))
return self._read()
except IOError as exception:
- _log.error("Failed to compute an image diff: %s" % str(exception))
- return (True, 0)
+ return (None, 0, "Failed to compute an image diff: %s" % str(exception))
def _start(self, tolerance):
command = [self._port._path_to_image_diff(), '--tolerance', str(tolerance)]
@@ -77,7 +76,7 @@ class ImageDiffer(object):
output = None
output_image = ""
- while True:
+ while not self._process.timed_out and not self._process.has_crashed():
output = self._process.read_stdout_line(deadline)
if self._process.timed_out or self._process.has_crashed() or not output:
break
@@ -93,12 +92,14 @@ class ImageDiffer(object):
break
stderr = self._process.pop_all_buffered_stderr()
+ err_str = ''
if stderr:
- _log.warn("ImageDiff produced stderr output:\n" + stderr)
+ err_str += "ImageDiff produced stderr output:\n" + stderr
if self._process.timed_out:
- _log.error("ImageDiff timed out")
+ err_str += "ImageDiff timed out\n"
if self._process.has_crashed():
- _log.error("ImageDiff crashed")
+ err_str += "ImageDiff crashed\n"
+
# FIXME: There is no need to shut down the ImageDiff server after every diff.
self._process.stop()
@@ -106,10 +107,10 @@ class ImageDiffer(object):
if output and output.startswith('diff'):
m = re.match('diff: (.+)% (passed|failed)', output)
if m.group(2) == 'passed':
- return [None, 0]
+ return (None, 0, None)
diff_percent = float(m.group(1))
- return (output_image, diff_percent)
+ return (output_image, diff_percent, err_str or None)
def stop(self):
if self._process:
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
index b06756c35..46cc98a11 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/image_diff_unittest.py
@@ -46,7 +46,12 @@ class FakePort(object):
class TestImageDiffer(unittest.TestCase):
- def test_diff_image(self):
+ def test_diff_image_failed(self):
port = FakePort(['diff: 100% failed\n'])
image_differ = ImageDiffer(port)
- self.assertEquals(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0))
+ self.assertEquals(image_differ.diff_image('foo', 'bar', 0.1), ('', 100.0, None))
+
+ def test_diff_image_passed(self):
+ port = FakePort(['diff: 0% passed\n'])
+ image_differ = ImageDiffer(port)
+ self.assertEquals(image_differ.diff_image('foo', 'bar', 0.1), (None, 0, None))
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mac.py b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
index 756bd2abe..d0caa5b22 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/mac.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mac.py
@@ -235,7 +235,7 @@ class MacPort(ApplePort):
for (test_name, process_name, pid) in crashed_processes:
# Passing None for output. This is a second pass after the test finished so
# if the output had any loggine we would have already collected it.
- crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)
+ crash_log = self._get_crash_log(process_name, pid, None, None, start_time, wait_for_log=False)[1]
if not crash_log:
continue
crash_logs[test_name] = crash_log
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
index 964ef07ef..56ae5a532 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/mock_drt_unittest.py
@@ -60,6 +60,9 @@ class MockDRTPortTest(port_testcase.PortTestCase):
def test_diff_image(self):
pass
+ def test_diff_image_crashed(self):
+ pass
+
def test_uses_apache(self):
pass
@@ -87,6 +90,10 @@ class MockDRTPortTest(port_testcase.PortTestCase):
def test_get_crash_log(self):
pass
+ def test_check_build(self):
+ pass
+
+
class MockDRTTest(unittest.TestCase):
def input_line(self, port, test_name, checksum=None):
url = port.create_driver(0).test_to_uri(test_name)
@@ -259,7 +266,3 @@ class MockTestShellTest(MockDRTTest):
self.assertTrue(options.test_shell)
self.assertTrue(options.pixel_tests)
self.assertEquals(options.pixel_path, '/tmp/png_result0.png')
-
-
-if __name__ == '__main__':
- port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
index a366e8951..44bf16768 100755
--- a/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/port_testcase.py
@@ -254,17 +254,37 @@ class PortTestCase(unittest.TestCase):
self.proc = None
def make_proc(port, nm, cmd, env):
- self.proc = MockServerProcess(port, nm, cmd, env, lines=['diff: 100% failed\n'])
+ self.proc = MockServerProcess(port, nm, cmd, env, lines=['diff: 100% failed\n', 'diff: 100% failed\n'])
return self.proc
port._server_process_constructor = make_proc
port.setup_test_run()
- self.assertEquals(port.diff_image('foo', 'bar'), ('', 100.0))
+ self.assertEquals(port.diff_image('foo', 'bar'), ('', 100.0, None))
self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0.1"])
+
+ self.assertEquals(port.diff_image('foo', 'bar', None), ('', 100.0, None))
+ self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0.1"])
+
+ self.assertEquals(port.diff_image('foo', 'bar', 0), ('', 100.0, None))
+ self.assertEquals(self.proc.cmd[1:3], ["--tolerance", "0"])
+
port.clean_up_test_run()
self.assertTrue(self.proc.stopped)
self.assertEquals(port._image_differ, None)
+ def test_diff_image_crashed(self):
+ port = self.make_port()
+ self.proc = None
+
+ def make_proc(port, nm, cmd, env):
+ self.proc = MockServerProcess(port, nm, cmd, env, crashed=True)
+ return self.proc
+
+ port._server_process_constructor = make_proc
+ port.setup_test_run()
+ self.assertEquals(port.diff_image('foo', 'bar'), ('', 0, 'ImageDiff crashed\n'))
+ port.clean_up_test_run()
+
def test_check_wdiff(self):
port = self.make_port()
port.check_wdiff()
@@ -563,33 +583,26 @@ class PortTestCase(unittest.TestCase):
port._apache_config_file_name_for_platform = lambda platform: 'httpd.conf'
self.assertEquals(port._path_to_apache_config_file(), '/mock-checkout/LayoutTests/http/conf/httpd.conf')
-
-# FIXME: This class and main() should be merged into test-webkitpy.
-class EnhancedTestLoader(unittest.TestLoader):
- integration_tests = False
- unit_tests = True
-
- def getTestCaseNames(self, testCaseClass):
- def isTestMethod(attrname, testCaseClass=testCaseClass):
- if not hasattr(getattr(testCaseClass, attrname), '__call__'):
- return False
- return ((self.unit_tests and attrname.startswith('test_')) or
- (self.integration_tests and attrname.startswith('integration_test_')))
- testFnNames = filter(isTestMethod, dir(testCaseClass))
- testFnNames.sort()
- return testFnNames
-
-
-def main(argv=None):
- if argv is None:
- argv = sys.argv
-
- test_loader = EnhancedTestLoader()
- if '-i' in argv:
- test_loader.integration_tests = True
- argv.remove('-i')
- if '--no-unit-tests' in argv:
- test_loader.unit_tests = False
- argv.remove('--no-unit-tests')
-
- unittest.main(argv=argv, testLoader=test_loader)
+ def test_check_build(self):
+ port = self.make_port(options=MockOptions(build=True))
+ self.build_called = False
+
+ def build_driver_called():
+ self.build_called = True
+ return True
+
+ port._build_driver = build_driver_called
+ port.check_build(False)
+ self.assertTrue(self.build_called)
+
+ port = self.make_port(options=MockOptions(root='/tmp', build=True))
+ self.build_called = False
+ port._build_driver = build_driver_called
+ port.check_build(False)
+ self.assertFalse(self.build_called, None)
+
+ port = self.make_port(options=MockOptions(build=False))
+ self.build_called = False
+ port._build_driver = build_driver_called
+ port.check_build(False)
+ self.assertFalse(self.build_called, None)
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/qt.py b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
index 392ab028f..6e7b0988d 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/qt.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/qt.py
@@ -184,13 +184,3 @@ class QtPort(Port):
return False
return result
- def _supports_switching_pixel_tests_per_test(self):
- return True
-
- def _should_run_as_pixel_test(self, test_input):
- return any(test_input.test_name.startswith(directory)
- for directory in QtPort._default_pixel_test_directories())
-
- @staticmethod
- def _default_pixel_test_directories():
- return ['compositing']
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py b/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
index ae48523eb..d234ebdc3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/server_process_mock.py
@@ -28,10 +28,10 @@
class MockServerProcess(object):
- def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, lines=None):
+ def __init__(self, port_obj=None, name=None, cmd=None, env=None, universal_newlines=False, lines=None, crashed=False):
self.timed_out = False
self.lines = lines or []
- self.crashed = False
+ self.crashed = crashed
self.writes = []
self.cmd = cmd
self.env = env
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/test.py b/Tools/Scripts/webkitpy/layout_tests/port/test.py
index 5714661fd..6302120d2 100644
--- a/Tools/Scripts/webkitpy/layout_tests/port/test.py
+++ b/Tools/Scripts/webkitpy/layout_tests/port/test.py
@@ -219,6 +219,10 @@ layer at (0,0) size 800x34
tests.add('websocket/tests/passes/text.html')
+ # For testing test are properly included from platform directories.
+ tests.add('platform/test-mac-leopard/http/test.html')
+ tests.add('platform/test-win-win7/http/test.html')
+
# For --no-http tests, test that platform specific HTTP tests are properly skipped.
tests.add('platform/test-snow-leopard/http/test.html')
tests.add('platform/test-snow-leopard/websocket/test.html')
@@ -402,8 +406,8 @@ class TestPort(Port):
def diff_image(self, expected_contents, actual_contents, tolerance=None):
diffed = actual_contents != expected_contents
if diffed:
- return ["< %s\n---\n> %s\n" % (expected_contents, actual_contents), 1]
- return (None, 0)
+ return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), 1, None)
+ return (None, 0, None)
def layout_tests_dir(self):
return LAYOUT_TEST_DIR
@@ -513,10 +517,6 @@ class TestPort(Port):
VirtualTestSuite('virtual/skipped', 'failures/expected', ['--virtual-arg2']),
]
- def supports_switching_pixel_tests_per_test(self):
- # Let it true so we can test the --pixel-test-directory option.
- return True
-
class TestDriver(Driver):
"""Test/Dummy implementation of the DumpRenderTree interface."""
diff --git a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py b/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
deleted file mode 100755
index 058787c71..000000000
--- a/Tools/Scripts/webkitpy/layout_tests/port/webkit.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-# Copyright (C) 2010 Google Inc. All rights reserved.
-# Copyright (C) 2010 Gabor Rapcsanyi <rgabor@inf.u-szeged.hu>, University of Szeged
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following disclaimer
-# in the documentation and/or other materials provided with the
-# distribution.
-# * Neither the Google name nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# FIXME: this is a stub file needed to ensure that chrome still compiles
-# until we can remove this from the browser_tests.isolate file downstream.
-# See https://bugs.webkit.org/show_bug.cgi?id=92549
-pass
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
index 95a07f59e..06b3032ff 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py
@@ -97,11 +97,6 @@ def run(port, options, args, regular_output=sys.stderr, buildbot_output=sys.stdo
for warning in warnings:
_log.warning(warning)
- if options.help_printing:
- printer.help_printing()
- printer.cleanup()
- return 0
-
if options.lint_test_files:
return lint(port, options)
@@ -182,6 +177,9 @@ def _set_up_derived_options(port, options):
options.pixel_test_directories = list(varified_dirs)
+ if options.run_singly:
+ options.verbose = True
+
return warnings
@@ -246,8 +244,9 @@ def parse_args(args=None):
optparse.make_option("--per-tile-painting",
action="store_true",
help="Use per-tile painting of composited pages"),
- optparse.make_option("--adb-args", type="string",
- help="Arguments parsed to Android adb, to select device, etc."),
+ optparse.make_option("--adb-device",
+ action="append", default=[],
+ help="Run Android layout tests on these devices."),
]))
option_group_definitions.append(("EFL-specific Options", [
@@ -368,7 +367,6 @@ def parse_args(args=None):
optparse.make_option("-n", "--dry-run", action="store_true",
default=False,
help="Do everything but actually run the tests or upload results."),
- # old-run-webkit-tests has --valgrind instead of wrapper.
optparse.make_option("--wrapper",
help="wrapper command to insert before invocations of "
"DumpRenderTree; option is split on whitespace before "
@@ -383,7 +381,6 @@ def parse_args(args=None):
help="Run all tests, even those marked SKIP in the test list (same as --skipped=ignore)"),
optparse.make_option("--time-out-ms",
help="Set the timeout for each test"),
- # old-run-webkit-tests calls --randomize-order --random:
optparse.make_option("--randomize-order", action="store_true",
default=False, help=("Run tests in random order (useful "
"for tracking down corruption)")),
@@ -392,15 +389,11 @@ def parse_args(args=None):
"of the layout tests")),
optparse.make_option("--run-part", help=("Run a specified part (n:m), "
"the nth of m parts, of the layout tests")),
- # old-run-webkit-tests calls --batch-size: --nthly n
- # Restart DumpRenderTree every n tests (default: 1000)
optparse.make_option("--batch-size",
help=("Run a the tests in batches (n), after every n tests, "
"DumpRenderTree is relaunched."), type="int", default=None),
- # old-run-webkit-tests calls --run-singly: -1|--singly
- # Isolate each test case run (implies --nthly 1 --verbose)
optparse.make_option("--run-singly", action="store_true",
- default=False, help="run a separate DumpRenderTree for each test"),
+ default=False, help="run a separate DumpRenderTree for each test (implies --verbose)"),
optparse.make_option("--child-processes",
help="Number of DumpRenderTrees to run in parallel."),
# FIXME: Display default number of child processes that will run.
@@ -412,21 +405,16 @@ def parse_args(args=None):
optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int",
default=None, help="Exit after the first N crashes instead of "
"running all tests"),
- optparse.make_option("--iterations", type="int", help="Number of times to run the set of tests (e.g. ABCABCABC)"),
- optparse.make_option("--repeat-each", type="int", help="Number of times to run each test (e.g. AAABBBCCC)"),
+ optparse.make_option("--iterations", type="int", default=1, help="Number of times to run the set of tests (e.g. ABCABCABC)"),
+ optparse.make_option("--repeat-each", type="int", default=1, help="Number of times to run each test (e.g. AAABBBCCC)"),
optparse.make_option("--retry-failures", action="store_true",
default=True,
help="Re-try any tests that produce unexpected results (default)"),
optparse.make_option("--no-retry-failures", action="store_false",
dest="retry_failures",
help="Don't re-try any tests that produce unexpected results."),
- optparse.make_option("--max-locked-shards", type="int",
+ optparse.make_option("--max-locked-shards", type="int", default=1,
help="Set the maximum number of locked shards"),
- # For chromium-android to reduce the cost of restarting the driver.
- # FIXME: Remove the option once per-test arg is supported:
- # https://bugs.webkit.org/show_bug.cgi?id=91539.
- optparse.make_option("--shard-ref-tests", action="store_true",
- help="Run ref tests in dedicated shard(s). Enabled on Android by default."),
optparse.make_option("--additional-env-var", type="string", action="append", default=[],
help="Passes that environment variable to the tests (--additional-env-var=NAME=VALUE)"),
]))
@@ -484,7 +472,7 @@ def main(argv=None):
traceback.print_exc(file=sys.stderr)
raise
- logging.getLogger().setLevel(logging.DEBUG if options.verbose else logging.INFO)
+ logging.getLogger().setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO)
return run(port, options, args)
diff --git a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
index 6e85977b2..ecb58b89d 100755
--- a/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests_integrationtest.py
@@ -60,10 +60,7 @@ from webkitpy.tool.mocktool import MockOptions
def parse_args(extra_args=None, record_results=False, tests_included=False, new_results=False, print_nothing=True):
extra_args = extra_args or []
- if print_nothing:
- args = ['--print', 'nothing']
- else:
- args = []
+ args = []
if not '--platform' in extra_args:
args.extend(['--platform', 'test'])
if not record_results:
@@ -95,7 +92,7 @@ def passing_run(extra_args=None, port_obj=None, record_results=False, tests_incl
buildbot_output = StringIO.StringIO()
regular_output = StringIO.StringIO()
res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
- return res == 0 and not regular_output.getvalue() and not buildbot_output.getvalue()
+ return res == 0
def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, host=None, new_results=False, shared_port=True):
@@ -186,9 +183,6 @@ class StreamTestingMixin(object):
def assertContains(self, stream, string):
self.assertTrue(string in stream.getvalue())
- def assertContainsLine(self, stream, string):
- self.assertTrue(string in stream.buflist)
-
def assertEmpty(self, stream):
self.assertFalse(stream.getvalue())
@@ -310,13 +304,13 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
def test_child_processes_2(self):
if self.should_test_processes:
_, _, regular_output, _ = logging_run(
- ['--print', 'config', '--child-processes', '2'], shared_port=False)
+ ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
def test_child_processes_min(self):
if self.should_test_processes:
_, _, regular_output, _ = logging_run(
- ['--print', 'config', '--child-processes', '2', 'passes'],
+ ['--debug-rwt-logging', '--child-processes', '2', 'passes'],
tests_included=True, shared_port=False)
self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
@@ -349,12 +343,6 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
res, out, err, user = logging_run(['--full-results-html'])
self.assertEqual(res, 0)
- def test_help_printing(self):
- res, out, err, user = logging_run(['--help-printing'])
- self.assertEqual(res, 0)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
def test_hung_thread(self):
res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
'failures/expected/hang.html'],
@@ -378,13 +366,13 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
res, out, err, user = logging_run(['resources'], tests_included=True)
self.assertEqual(res, -1)
self.assertEmpty(out)
- self.assertContainsLine(err, 'No tests to run.\n')
+ self.assertContains(err, 'No tests to run.\n')
def test_no_tests_found_2(self):
res, out, err, user = logging_run(['foo'], tests_included=True)
self.assertEqual(res, -1)
self.assertEmpty(out)
- self.assertContainsLine(err, 'No tests to run.\n')
+ self.assertContains(err, 'No tests to run.\n')
def test_randomize_order(self):
# FIXME: verify order was shuffled
@@ -439,11 +427,11 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
host = MockHost()
res, out, err, _ = logging_run(['--iterations', '2',
'--repeat-each', '4',
- '--print', 'everything',
+ '--debug-rwt-logging',
'passes/text.html', 'failures/expected/text.html'],
tests_included=True, host=host, record_results=True)
- self.assertContainsLine(out, "=> Results: 8/16 tests passed (50.0%)\n")
- self.assertContainsLine(err, "All 16 tests ran as expected.\n")
+ self.assertContains(out, "=> Results: 8/16 tests passed (50.0%)\n")
+ self.assertContains(err, "All 16 tests ran as expected.\n")
def test_run_chunk(self):
# Test that we actually select the right chunk
@@ -761,7 +749,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
def test_retrying_and_flaky_tests(self):
host = MockHost()
- res, out, err, _ = logging_run(['failures/flaky'], tests_included=True, host=host)
+ res, out, err, _ = logging_run(['--debug-rwt-logging', 'failures/flaky'], tests_included=True, host=host)
self.assertEquals(res, 0)
self.assertTrue('Retrying' in err.getvalue())
self.assertTrue('Unexpected flakiness' in out.getvalue())
@@ -776,7 +764,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertEquals(res, 1)
self.assertTrue('Clobbering old results' in err.getvalue())
self.assertTrue('flaky/text.html' in err.getvalue())
- self.assertTrue('Unexpected text diff' in out.getvalue())
+ self.assertTrue('Unexpected text failures' in out.getvalue())
self.assertFalse('Unexpected flakiness' in out.getvalue())
self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
self.assertFalse(host.filesystem.exists('retries'))
@@ -795,7 +783,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
class ImageDiffTestPort(TestPort):
def diff_image(self, expected_contents, actual_contents, tolerance=None):
self.tolerance_used_for_diff_image = self._options.tolerance
- return (True, 1)
+ return (True, 1, None)
def get_port_for_run(args):
options, parsed_args = run_webkit_tests.parse_args(args)
@@ -863,7 +851,7 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
- self.assertContainsLine(regular_output, '--additional-platform-directory=foo is ignored since it is not absolute\n')
+ self.assertContains(regular_output, '--additional-platform-directory=foo is ignored since it is not absolute\n')
def test_additional_expectations(self):
host = MockHost()
@@ -894,8 +882,9 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
def test_platform_tests_are_found(self):
- tests_run = get_tests_run(['http'], tests_included=True, flatten_batches=True)
- self.assertTrue('platform/test-snow-leopard/http/test.html' in tests_run)
+ tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'], tests_included=True, flatten_batches=True)
+ self.assertTrue('platform/test-mac-leopard/http/test.html' in tests_run)
+ self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
def test_output_diffs(self):
# Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
@@ -928,6 +917,21 @@ class MainTest(unittest.TestCase, StreamTestingMixin):
# This is empty because we don't even get a chance to configure the logger before failing.
self.assertEquals(logs, '')
+ def test_verbose_in_child_processes(self):
+ # When we actually run multiple processes, we may have to reconfigure logging in the
+ # child process (e.g., on win32) and we need to make sure that works and we still
+ # see the verbose log output. However, we can't use logging_run() because using
+ # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
+ options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
+ host = MockHost()
+ port_obj = host.port_factory.get(port_name=options.platform, options=options)
+ buildbot_output = StringIO.StringIO()
+ regular_output = StringIO.StringIO()
+ res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
+ self.assertTrue('text.html passed' in regular_output.getvalue())
+ self.assertTrue('image.html passed' in regular_output.getvalue())
+
+
class EndToEndTest(unittest.TestCase):
def parse_full_results(self, full_results_text):
json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
@@ -974,7 +978,7 @@ class RebaselineTest(unittest.TestCase, StreamTestingMixin):
baseline = file + "-expected" + ext
baseline_msg = 'Writing new expected result "%s"\n' % baseline
self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
- self.assertContainsLine(err, baseline_msg)
+ self.assertContains(err, baseline_msg)
# FIXME: Add tests to ensure that we're *not* writing baselines when we're not
# supposed to be.
diff --git a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
index a02e4c5b8..237d689ce 100755
--- a/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/servers/http_server_integrationtest.py
@@ -38,8 +38,6 @@ import sys
import tempfile
import unittest
-from webkitpy.layout_tests.port import port_testcase
-
class BaseTest(unittest.TestCase):
"""Basic framework for script tests."""
@@ -145,7 +143,3 @@ class WebsocketserverTest(BaseTest):
# FIXME: test TLS at some point?
PORTS = (8880, )
SCRIPT_NAME = 'new-run-webkit-websocketserver'
-
-
-if __name__ == '__main__':
- port_testcase.main()
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing.py b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
index 1c2fecd7b..c23fd7d40 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing.py
@@ -38,115 +38,21 @@ from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.models.test_expectations import TestExpectations
from webkitpy.layout_tests.views.metered_stream import MeteredStream
-NUM_SLOW_TESTS_TO_LOG = 10
-PRINT_DEFAULT = "misc,one-line-progress,one-line-summary,unexpected,unexpected-results,updates"
-PRINT_EVERYTHING = "actual,config,expected,misc,one-line-progress,one-line-summary,slowest,timing,unexpected,unexpected-results,updates"
-
-HELP_PRINTING = """
-Output for run-webkit-tests is controlled by a comma-separated list of
-values passed to --print. Values either influence the overall output, or
-the output at the beginning of the run, during the run, or at the end:
-
-Overall options:
- nothing don't print anything. This overrides every other option
- default include the default options. This is useful for logging
- the default options plus additional settings.
- everything print (almost) everything (except the trace-* options,
- see below for the full list )
- misc print miscellaneous things like blank lines
-
-At the beginning of the run:
- config print the test run configuration
- expected print a summary of what is expected to happen
- (# passes, # failures, etc.)
-
-During the run:
- one-line-progress print a one-line progress message or bar
- unexpected print any unexpected results as they occur
- updates print updates on which stage is executing
- trace-everything print detailed info on every test's results
- (baselines, expectation, time it took to run). If
- this is specified it will override the '*-progress'
- options, the 'trace-unexpected' option, and the
- 'unexpected' option.
- trace-unexpected like 'trace-everything', but only for tests with
- unexpected results. If this option is specified,
- it will override the 'unexpected' option.
-
-At the end of the run:
- actual print a summary of the actual results
- slowest print %(slowest)d slowest tests and the time they took
- timing print timing statistics
- unexpected-results print a list of the tests with unexpected results
- one-line-summary print a one-line summary of the run
-
-Notes:
- - If 'nothing' is specified, it overrides all of the other options.
- - Specifying --verbose is equivalent to --print everything plus it
- changes the format of the log messages to add timestamps and other
- information. If you specify --verbose and --print X, then X overrides
- the --print everything implied by --verbose.
-
---print 'everything' is equivalent to --print '%(everything)s'.
-
-The default (--print default) is equivalent to --print '%(default)s'.
-""" % {'slowest': NUM_SLOW_TESTS_TO_LOG, 'everything': PRINT_EVERYTHING,
- 'default': PRINT_DEFAULT}
+NUM_SLOW_TESTS_TO_LOG = 10
def print_options():
return [
- # Note: We use print_options rather than just 'print' because print
- # is a reserved word.
- # Note: Also, we don't specify a default value so we can detect when
- # no flag is specified on the command line and use different defaults
- # based on whether or not --verbose is specified (since --print
- # overrides --verbose).
- optparse.make_option("--print", dest="print_options",
- help=("controls print output of test run. "
- "Use --help-printing for more.")),
- optparse.make_option("--help-printing", action="store_true",
- help="show detailed help on controlling print output"),
- optparse.make_option("-v", "--verbose", action="store_true",
- default=False, help="include debug-level logging"),
- ]
-
-
-def parse_print_options(print_options, verbose):
- """Parse the options provided to --print and dedup and rank them.
-
- Returns
- a set() of switches that govern how logging is done
-
- """
- if print_options:
- switches = set(print_options.split(','))
- elif verbose:
- switches = set(PRINT_EVERYTHING.split(','))
- else:
- switches = set(PRINT_DEFAULT.split(','))
-
- if 'nothing' in switches:
- return set()
-
- if 'everything' in switches:
- switches.discard('everything')
- switches.update(set(PRINT_EVERYTHING.split(',')))
-
- if 'default' in switches:
- switches.discard('default')
- switches.update(set(PRINT_DEFAULT.split(',')))
-
- if 'trace-everything' in switches:
- switches.discard('one-line-progress')
- switches.discard('trace-unexpected')
- switches.discard('unexpected')
-
- if 'trace-unexpected' in switches:
- switches.discard('unexpected')
-
- return switches
+ optparse.make_option('-q', '--quiet', action='store_true', default=False,
+ help='run quietly (errors, warnings, and progress only)'),
+ optparse.make_option('-v', '--verbose', action='store_true', default=False,
+ help='print a summarized result for every test (one line per test)'),
+ optparse.make_option('--details', action='store_true', default=False,
+ help='print detailed results for every test'),
+ optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
+ help='print timestamps and debug information for run-webkit-tests itself'),
+ ]
class Printer(object):
@@ -162,21 +68,14 @@ class Printer(object):
By default the buildbot-parsed code gets logged to stdout, and regular
output gets logged to stderr."""
def __init__(self, port, options, regular_output, buildbot_output, logger=None):
- """
- Args
- port interface to port-specific routines
- options OptionParser object with command line settings
- regular_output stream to which output intended only for humans
- should be written
- buildbot_output stream to which output intended to be read by
- the buildbots (and humans) should be written
- logger optional logger to integrate into the stream.
- """
+ self.num_completed = 0
+ self.num_tests = 0
self._port = port
self._options = options
self._buildbot_stream = buildbot_output
- self._meter = MeteredStream(regular_output, options.verbose, logger=logger)
- self.switches = parse_print_options(options.print_options, options.verbose)
+ self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger)
+ self._running_tests = []
+ self._completed_tests = []
def cleanup(self):
self._meter.cleanup()
@@ -184,77 +83,54 @@ class Printer(object):
def __del__(self):
self.cleanup()
- # These two routines just hide the implementation of the switches.
- def disabled(self, option):
- return not option in self.switches
-
- def enabled(self, option):
- return option in self.switches
-
- def help_printing(self):
- self._write(HELP_PRINTING)
-
def print_config(self):
- """Prints the configuration for the test run."""
- self._print_config("Using port '%s'" % self._port.name())
- self._print_config("Test configuration: %s" % self._port.test_configuration())
- self._print_config("Placing test results in %s" % self._options.results_directory)
+ self._print_default("Using port '%s'" % self._port.name())
+ self._print_default("Test configuration: %s" % self._port.test_configuration())
+ self._print_default("Placing test results in %s" % self._options.results_directory)
# FIXME: should these options be in printing_options?
if self._options.new_baseline:
- self._print_config("Placing new baselines in %s" % self._port.baseline_path())
+ self._print_default("Placing new baselines in %s" % self._port.baseline_path())
fs = self._port.host.filesystem
fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
- self._print_config("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
+ self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
- self._print_config("Using %s build" % self._options.configuration)
+ self._print_default("Using %s build" % self._options.configuration)
if self._options.pixel_tests:
- self._print_config("Pixel tests enabled")
+ self._print_default("Pixel tests enabled")
else:
- self._print_config("Pixel tests disabled")
+ self._print_default("Pixel tests disabled")
+
+ self._print_default("Regular timeout: %s, slow test timeout: %s" %
+ (self._options.time_out_ms, self._options.slow_time_out_ms))
- self._print_config("Regular timeout: %s, slow test timeout: %s" %
- (self._options.time_out_ms, self._options.slow_time_out_ms))
+ self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
+ self._print_default('')
- self._print_config('Command line: ' + ' '.join(self._port.driver_cmd_line()))
- self._print_config('')
+ def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
+ found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
+ if repeat_each * iterations > 1:
+ found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
+ found_str += ', skipping %d' % (num_all_test_files - num_to_run)
+ self._print_default(found_str + '.')
- def print_expected(self, num_all_test_files, result_summary, tests_with_result_type_callback):
- self._print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files))
+ def print_expected(self, result_summary, tests_with_result_type_callback):
self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback)
self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback)
self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
- self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped", tests_with_result_type_callback)
- self._print_expected('')
-
- if self._options.repeat_each > 1:
- self._print_expected('Running each test %d times.' % self._options.repeat_each)
- if self._options.iterations > 1:
- self._print_expected('Running %d iterations of the tests.' % self._options.iterations)
- if self._options.iterations > 1 or self._options.repeat_each > 1:
- self._print_expected('')
+ self._print_debug('')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
driver_name = self._port.driver_name()
if num_workers == 1:
- self._print_config("Running 1 %s over %s." %
- (driver_name, grammar.pluralize('shard', num_shards)))
+ self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize('shard', num_shards)))
else:
- self._print_config("Running %d %ss in parallel over %d shards (%d locked)." %
+ self._print_default("Running %d %ss in parallel over %d shards (%d locked)." %
(num_workers, driver_name, num_shards, num_locked_shards))
- self._print_config('')
-
- def _print_expected_results_of_type(self, result_summary,
- result_type, result_type_str, tests_with_result_type_callback):
- """Print the number of the tests in a given result class.
-
- Args:
- result_summary - the object containing all the results to report on
- result_type - the particular result type to report in the summary.
- result_type_str - a string description of the result_type.
- expectations - populated TestExpectations object for stats
- """
+ self._print_default('')
+
+ def _print_expected_results_of_type(self, result_summary, result_type, result_type_str, tests_with_result_type_callback):
tests = tests_with_result_type_callback(result_type)
now = result_summary.tests_by_timeline[test_expectations.NOW]
wontfix = result_summary.tests_by_timeline[test_expectations.WONTFIX]
@@ -263,12 +139,9 @@ class Printer(object):
# nicely-aligned table.
fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
% (self._num_digits(now), self._num_digits(wontfix)))
- self._print_expected(fmtstr %
- (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
+ self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
def _num_digits(self, num):
- """Returns the number of digits needed to represent the length of a
- sequence."""
ndigits = 1
if len(num):
ndigits = int(math.log10(len(num))) + 1
@@ -277,61 +150,36 @@ class Printer(object):
def print_results(self, run_time, thread_timings, test_timings, individual_test_timings, result_summary, unexpected_results):
self._print_timing_statistics(run_time, thread_timings, test_timings, individual_test_timings, result_summary)
self._print_result_summary(result_summary)
-
- self.print_one_line_summary(result_summary.total - result_summary.expected_skips, result_summary.expected - result_summary.expected_skips, result_summary.unexpected)
-
- self.print_unexpected_results(unexpected_results)
+ self._print_one_line_summary(result_summary.total - result_summary.expected_skips,
+ result_summary.expected - result_summary.expected_skips,
+ result_summary.unexpected)
+ self._print_unexpected_results(unexpected_results)
def _print_timing_statistics(self, total_time, thread_timings,
directory_test_timings, individual_test_timings,
result_summary):
- """Record timing-specific information for the test run.
-
- Args:
- total_time: total elapsed time (in seconds) for the test run
- thread_timings: wall clock time each thread ran for
- directory_test_timings: timing by directory
- individual_test_timings: timing by file
- result_summary: summary object for the test run
- """
- self.print_timing("Test timing:")
- self.print_timing(" %6.2f total testing time" % total_time)
- self.print_timing("")
- self.print_timing("Thread timing:")
+ self._print_debug("Test timing:")
+ self._print_debug(" %6.2f total testing time" % total_time)
+ self._print_debug("")
+ self._print_debug("Thread timing:")
cuml_time = 0
for t in thread_timings:
- self.print_timing(" %10s: %5d tests, %6.2f secs" %
- (t['name'], t['num_tests'], t['total_time']))
+ self._print_debug(" %10s: %5d tests, %6.2f secs" % (t['name'], t['num_tests'], t['total_time']))
cuml_time += t['total_time']
- self.print_timing(" %6.2f cumulative, %6.2f optimal" %
- (cuml_time, cuml_time / int(self._options.child_processes)))
- self.print_timing("")
+ self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / int(self._options.child_processes)))
+ self._print_debug("")
self._print_aggregate_test_statistics(individual_test_timings)
- self._print_individual_test_times(individual_test_timings,
- result_summary)
+ self._print_individual_test_times(individual_test_timings, result_summary)
self._print_directory_timings(directory_test_timings)
def _print_aggregate_test_statistics(self, individual_test_timings):
- """Prints aggregate statistics (e.g. median, mean, etc.) for all tests.
- Args:
- individual_test_timings: List of TestResults for all tests.
- """
times_for_dump_render_tree = [test_stats.test_run_time for test_stats in individual_test_timings]
- self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):",
- times_for_dump_render_tree)
-
- def _print_individual_test_times(self, individual_test_timings,
- result_summary):
- """Prints the run times for slow, timeout and crash tests.
- Args:
- individual_test_timings: List of TestStats for all tests.
- result_summary: summary object for test run
- """
- # Reverse-sort by the time spent in DumpRenderTree.
- individual_test_timings.sort(lambda a, b:
- cmp(b.test_run_time, a.test_run_time))
+ self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
+ def _print_individual_test_times(self, individual_test_timings, result_summary):
+ # Reverse-sort by the time spent in DumpRenderTree.
+ individual_test_timings.sort(lambda a, b: cmp(b.test_run_time, a.test_run_time))
num_printed = 0
slow_tests = []
timeout_or_crash_tests = []
@@ -354,63 +202,37 @@ class Printer(object):
num_printed = num_printed + 1
unexpected_slow_tests.append(test_tuple)
- self.print_timing("")
- self._print_test_list_timing("%s slowest tests that are not "
- "marked as SLOW and did not timeout/crash:" % NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
- self.print_timing("")
+ self._print_debug("")
+ self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
+ NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
+ self._print_debug("")
self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
- self.print_timing("")
- self._print_test_list_timing("Tests that timed out or crashed:",
- timeout_or_crash_tests)
- self.print_timing("")
+ self._print_debug("")
+ self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
+ self._print_debug("")
def _print_test_list_timing(self, title, test_list):
- """Print timing info for each test.
-
- Args:
- title: section heading
- test_list: tests that fall in this section
- """
- if self.disabled('slowest'):
- return
-
- self.print_timing(title)
+ self._print_debug(title)
for test_tuple in test_list:
test_run_time = round(test_tuple.test_run_time, 1)
- self.print_timing(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
+ self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
def _print_directory_timings(self, directory_test_timings):
- """Print timing info by directory for any directories that
- take > 10 seconds to run.
-
- Args:
- directory_test_timing: time info for each directory
- """
timings = []
for directory in directory_test_timings:
num_tests, time_for_directory = directory_test_timings[directory]
- timings.append((round(time_for_directory, 1), directory,
- num_tests))
+ timings.append((round(time_for_directory, 1), directory, num_tests))
timings.sort()
- self.print_timing("Time to process slowest subdirectories:")
+ self._print_debug("Time to process slowest subdirectories:")
min_seconds_to_print = 10
for timing in timings:
if timing[0] > min_seconds_to_print:
- self.print_timing(
- " %s took %s seconds to run %s tests." % (timing[1],
- timing[0], timing[2]))
- self.print_timing("")
+ self._print_debug(" %s took %s seconds to run %s tests." % (timing[1], timing[0], timing[2]))
+ self._print_debug("")
def _print_statistics_for_test_timings(self, title, timings):
- """Prints the median, mean and standard deviation of the values in
- timings.
-
- Args:
- title: Title for these timings.
- timings: A list of floats representing times.
- """
- self.print_timing(title)
+ self._print_debug(title)
timings.sort()
num_tests = len(timings)
@@ -432,158 +254,131 @@ class Printer(object):
sum_of_deviations = math.pow(timing - mean, 2)
std_deviation = math.sqrt(sum_of_deviations / num_tests)
- self.print_timing(" Median: %6.3f" % median)
- self.print_timing(" Mean: %6.3f" % mean)
- self.print_timing(" 90th percentile: %6.3f" % percentile90)
- self.print_timing(" 99th percentile: %6.3f" % percentile99)
- self.print_timing(" Standard dev: %6.3f" % std_deviation)
- self.print_timing("")
+ self._print_debug(" Median: %6.3f" % median)
+ self._print_debug(" Mean: %6.3f" % mean)
+ self._print_debug(" 90th percentile: %6.3f" % percentile90)
+ self._print_debug(" 99th percentile: %6.3f" % percentile99)
+ self._print_debug(" Standard dev: %6.3f" % std_deviation)
+ self._print_debug("")
def _print_result_summary(self, result_summary):
- """Print a short summary about how many tests passed.
+ if not self._options.debug_rwt_logging:
+ return
- Args:
- result_summary: information to log
- """
failed = result_summary.total_failures
total = result_summary.total - result_summary.expected_skips
- passed = total - failed
+ passed = total - failed - result_summary.remaining
pct_passed = 0.0
if total > 0:
pct_passed = float(passed) * 100 / total
- self.print_actual("")
- self.print_actual("=> Results: %d/%d tests passed (%.1f%%)" %
- (passed, total, pct_passed))
- self.print_actual("")
- self._print_result_summary_entry(result_summary,
- test_expectations.NOW, "Tests to be fixed")
+ self._print_for_bot("=> Results: %d/%d tests passed (%.1f%%)" % (passed, total, pct_passed))
+ self._print_for_bot("")
+ self._print_result_summary_entry(result_summary, test_expectations.NOW, "Tests to be fixed")
- self.print_actual("")
- self._print_result_summary_entry(result_summary,
- test_expectations.WONTFIX,
+ self._print_for_bot("")
+ # FIXME: We should be skipping anything marked WONTFIX, so we shouldn't bother logging these stats.
+ self._print_result_summary_entry(result_summary, test_expectations.WONTFIX,
"Tests that will only be fixed if they crash (WONTFIX)")
- self.print_actual("")
+ self._print_for_bot("")
- def _print_result_summary_entry(self, result_summary, timeline,
- heading):
- """Print a summary block of results for a particular timeline of test.
-
- Args:
- result_summary: summary to print results for
- timeline: the timeline to print results for (NOT, WONTFIX, etc.)
- heading: a textual description of the timeline
- """
+ def _print_result_summary_entry(self, result_summary, timeline, heading):
total = len(result_summary.tests_by_timeline[timeline])
not_passing = (total -
len(result_summary.tests_by_expectation[test_expectations.PASS] &
result_summary.tests_by_timeline[timeline]))
- self.print_actual("=> %s (%d):" % (heading, not_passing))
+ self._print_for_bot("=> %s (%d):" % (heading, not_passing))
for result in TestExpectations.EXPECTATION_ORDER:
- if result == test_expectations.PASS:
+ if result in (test_expectations.PASS, test_expectations.SKIP):
continue
results = (result_summary.tests_by_expectation[result] &
result_summary.tests_by_timeline[timeline])
desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result]
if not_passing and len(results):
pct = len(results) * 100.0 / not_passing
- self.print_actual(" %5d %-24s (%4.1f%%)" %
- (len(results), desc[len(results) != 1], pct))
-
-
- def print_actual(self, msg):
- if self.disabled('actual'):
- return
- self._buildbot_stream.write("%s\n" % msg)
-
- def _print_config(self, msg):
- self.write(msg, 'config')
-
- def _print_expected(self, msg):
- self.write(msg, 'expected')
-
- def print_timing(self, msg):
- self.write(msg, 'timing')
-
- def print_one_line_summary(self, total, expected, unexpected):
- """Print a one-line summary of the test run to stdout.
-
- Args:
- total: total number of tests run
- expected: number of expected results
- unexpected: number of unexpected results
- """
- if self.disabled('one-line-summary'):
- return
+ self._print_for_bot(" %5d %-24s (%4.1f%%)" % (len(results), desc[0], pct))
+ def _print_one_line_summary(self, total, expected, unexpected):
incomplete = total - expected - unexpected
incomplete_str = ''
if incomplete:
- self._write("")
+ self._print_default("")
incomplete_str = " (%d didn't run)" % incomplete
+ if self._options.verbose or self._options.debug_rwt_logging or unexpected:
+ self.writeln("")
+
+ summary = ''
if unexpected == 0:
if expected == total:
if expected > 1:
- self._write("All %d tests ran as expected." % expected)
+ summary = "All %d tests ran as expected." % expected
else:
- self._write("The test ran as expected.")
+ summary = "The test ran as expected."
else:
- self._write("%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str))
+ summary = "%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str)
else:
- self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str))
- self._write("")
-
- def print_finished_test(self, result, expected, exp_str, got_str, result_summary, retrying, test_files_list):
- self.print_test_result(result, expected, exp_str, got_str)
- self.print_progress(result_summary, retrying, test_files_list)
-
- def print_test_result(self, result, expected, exp_str, got_str):
- """Print the result of the test as determined by --print.
-
- This routine is used to print the details of each test as it completes.
-
- Args:
- result - The actual TestResult object
- expected - Whether the result we got was an expected result
- exp_str - What we expected to get (used for tracing)
- got_str - What we actually got (used for tracing)
-
- Note that we need all of these arguments even though they seem
- somewhat redundant, in order to keep this routine from having to
- known anything about the set of expectations.
- """
- if (self.enabled('trace-everything') or
- self.enabled('trace-unexpected') and not expected):
+ summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str)
+
+ self._print_quiet(summary)
+ self._print_quiet("")
+
+ def print_started_test(self, test_name):
+ self._running_tests.append(test_name)
+ if len(self._running_tests) > 1:
+ suffix = ' (+%d)' % (len(self._running_tests) - 1)
+ else:
+ suffix = ''
+ if self._options.verbose:
+ write = self._meter.write_update
+ else:
+ write = self._meter.write_throttled_update
+ write('[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix))
+
+ def print_finished_test(self, result, expected, exp_str, got_str):
+ self.num_completed += 1
+ test_name = result.test_name
+ if self._options.details:
self._print_test_trace(result, exp_str, got_str)
- elif not expected and self.enabled('unexpected'):
- self._print_unexpected_test_result(result)
+ elif (self._options.verbose and not self._options.debug_rwt_logging) or not expected:
+ desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type]
+ suffix = ' ' + desc[1]
+ if not expected:
+ suffix += ' unexpectedly' + desc[2]
+ self.writeln("[%d/%d] %s%s" % (self.num_completed, self.num_tests, test_name, suffix))
+ elif self.num_completed == self.num_tests:
+ self._meter.write_update('')
+ else:
+ desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type]
+ suffix = ' ' + desc[1]
+ if test_name == self._running_tests[0]:
+ self._completed_tests.insert(0, [test_name, suffix])
+ else:
+ self._completed_tests.append([test_name, suffix])
+
+ for test_name, suffix in self._completed_tests:
+ self._meter.write_throttled_update('[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix))
+ self._completed_tests = []
+ self._running_tests.remove(test_name)
def _print_test_trace(self, result, exp_str, got_str):
- """Print detailed results of a test (triggered by --print trace-*).
- For each test, print:
- - location of the expected baselines
- - expected results
- - actual result
- - timing info
- """
test_name = result.test_name
- self._write('trace: %s' % test_name)
+ self._print_default('[%d/%d] %s' % (self.num_completed, self.num_tests, test_name))
base = self._port.lookup_virtual_test_base(test_name)
if base:
args = ' '.join(self._port.lookup_virtual_test_args(test_name))
- self._write(' base: %s' % base)
- self._write(' args: %s' % args)
+ self._print_default(' base: %s' % base)
+ self._print_default(' args: %s' % args)
for extension in ('.txt', '.png', '.wav', '.webarchive'):
self._print_baseline(test_name, extension)
- self._write(' exp: %s' % exp_str)
- self._write(' got: %s' % got_str)
- self._write(' took: %-.3f' % result.test_run_time)
- self._write('')
+ self._print_default(' exp: %s' % exp_str)
+ self._print_default(' got: %s' % got_str)
+ self._print_default(' took: %-.3f' % result.test_run_time)
+ self._print_default('')
def _print_baseline(self, test_name, extension):
baseline = self._port.expected_filename(test_name, extension)
@@ -591,37 +386,12 @@ class Printer(object):
relpath = self._port.relative_test_filename(baseline)
else:
relpath = '<none>'
- self._write(' %s: %s' % (extension[1:], relpath))
+ self._print_default(' %s: %s' % (extension[1:], relpath))
- def _print_unexpected_test_result(self, result):
- """Prints one unexpected test result line."""
- desc = TestExpectations.EXPECTATION_DESCRIPTIONS[result.type][0]
- self.write(" %s -> unexpected %s" % (result.test_name, desc), "unexpected")
-
- def print_progress(self, result_summary, retrying, test_list):
+ def _print_progress(self, result_summary, retrying, test_list):
"""Print progress through the tests as determined by --print."""
- if self.disabled('one-line-progress'):
- return
-
- if result_summary.remaining == 0:
- self._meter.write_update('')
- return
-
- percent_complete = 100 * (result_summary.expected +
- result_summary.unexpected) / result_summary.total
- action = "Testing"
- if retrying:
- action = "Retrying"
-
- self._meter.write_throttled_update("%s (%d%%): %d ran as expected, %d didn't, %d left" %
- (action, percent_complete, result_summary.expected,
- result_summary.unexpected, result_summary.remaining))
-
- def print_unexpected_results(self, unexpected_results):
- """Prints a list of the unexpected results to the buildbot stream."""
- if self.disabled('unexpected-results'):
- return
-
+ def _print_unexpected_results(self, unexpected_results):
+ # Prints to the buildbot stream
passes = {}
flaky = {}
regressions = {}
@@ -634,17 +404,11 @@ class Printer(object):
expected = results['expected'].split(" ")
if actual == ['PASS']:
if 'CRASH' in expected:
- add_to_dict_of_lists(passes,
- 'Expected to crash, but passed',
- test)
+ add_to_dict_of_lists(passes, 'Expected to crash, but passed', test)
elif 'TIMEOUT' in expected:
- add_to_dict_of_lists(passes,
- 'Expected to timeout, but passed',
- test)
+ add_to_dict_of_lists(passes, 'Expected to timeout, but passed', test)
else:
- add_to_dict_of_lists(passes,
- 'Expected to fail, but passed',
- test)
+ add_to_dict_of_lists(passes, 'Expected to fail, but passed', test)
elif len(actual) > 1:
# We group flaky tests by the first actual result we got.
add_to_dict_of_lists(flaky, actual[0], test)
@@ -654,23 +418,21 @@ class Printer(object):
resultsjsonparser.for_each_test(unexpected_results['tests'], add_result)
if len(passes) or len(flaky) or len(regressions):
- self._buildbot_stream.write("\n")
-
+ self._print_for_bot("")
if len(passes):
for key, tests in passes.iteritems():
- self._buildbot_stream.write("%s: (%d)\n" % (key, len(tests)))
+ self._print_for_bot("%s: (%d)" % (key, len(tests)))
tests.sort()
for test in tests:
- self._buildbot_stream.write(" %s\n" % test)
- self._buildbot_stream.write("\n")
- self._buildbot_stream.write("\n")
+ self._print_for_bot(" %s" % test)
+ self._print_for_bot("")
+ self._print_for_bot("")
if len(flaky):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in flaky.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
- self._buildbot_stream.write("Unexpected flakiness: %s (%d)\n"
- % (descriptions[result][1], len(tests)))
+ self._print_for_bot("Unexpected flakiness: %s (%d)" % (descriptions[result][0], len(tests)))
tests.sort()
for test in tests:
@@ -679,41 +441,41 @@ class Printer(object):
expected = result['expected'].split(" ")
result = TestExpectations.EXPECTATIONS[key.lower()]
new_expectations_list = list(set(actual) | set(expected))
- self._buildbot_stream.write(" %s = %s\n" %
- (test, " ".join(new_expectations_list)))
- self._buildbot_stream.write("\n")
- self._buildbot_stream.write("\n")
+ self._print_for_bot(" %s = %s" % (test, " ".join(new_expectations_list)))
+ self._print_for_bot("")
+ self._print_for_bot("")
if len(regressions):
descriptions = TestExpectations.EXPECTATION_DESCRIPTIONS
for key, tests in regressions.iteritems():
result = TestExpectations.EXPECTATIONS[key.lower()]
- self._buildbot_stream.write(
- "Regressions: Unexpected %s : (%d)\n" % (
- descriptions[result][1], len(tests)))
+ self._print_for_bot("Regressions: Unexpected %s : (%d)" % (descriptions[result][0], len(tests)))
tests.sort()
for test in tests:
- self._buildbot_stream.write(" %s = %s\n" % (test, key))
- self._buildbot_stream.write("\n")
- self._buildbot_stream.write("\n")
+ self._print_for_bot(" %s = %s" % (test, key))
+ self._print_for_bot("")
- if len(unexpected_results['tests']) and self._options.verbose:
- self._buildbot_stream.write("%s\n" % ("-" * 78))
+ if len(unexpected_results['tests']) and self._options.debug_rwt_logging:
+ self._print_for_bot("%s" % ("-" * 78))
- def write_update(self, msg):
- if self.disabled('updates'):
- return
- self._meter.write_update(msg)
+ def _print_quiet(self, msg):
+ self.writeln(msg)
- def write(self, msg, option="misc"):
- if self.disabled(option):
- return
- self._write(msg)
+ def _print_default(self, msg):
+ if not self._options.quiet:
+ self.writeln(msg)
+
+ def _print_debug(self, msg):
+ if self._options.debug_rwt_logging:
+ self.writeln(msg)
- def writeln(self, *args, **kwargs):
- self._meter.writeln(*args, **kwargs)
+ def _print_for_bot(self, msg):
+ self._buildbot_stream.write(msg + "\n")
+
+ def write_update(self, msg):
+ self._meter.write_update(msg)
- def _write(self, msg):
+ def writeln(self, msg):
self._meter.writeln(msg)
def flush(self):
diff --git a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
index f8dd61db7..17fc4b9a3 100644
--- a/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
+++ b/Tools/Scripts/webkitpy/layout_tests/views/printing_unittest.py
@@ -57,36 +57,6 @@ class TestUtilityFunctions(unittest.TestCase):
options, args = get_options([])
self.assertTrue(options is not None)
- def test_parse_print_options(self):
- def test_switches(args, expected_switches_str, verbose=False):
- options, args = get_options(args)
- if expected_switches_str:
- expected_switches = set(expected_switches_str.split(','))
- else:
- expected_switches = set()
- switches = printing.parse_print_options(options.print_options,
- verbose)
- self.assertEqual(expected_switches, switches)
-
- # test that we default to the default set of switches
- test_switches([], printing.PRINT_DEFAULT)
-
- # test that verbose defaults to everything
- test_switches([], printing.PRINT_EVERYTHING, verbose=True)
-
- # test that --print default does what it's supposed to
- test_switches(['--print', 'default'], printing.PRINT_DEFAULT)
-
- # test that --print nothing does what it's supposed to
- test_switches(['--print', 'nothing'], None)
-
- # test that --print everything does what it's supposed to
- test_switches(['--print', 'everything'], printing.PRINT_EVERYTHING)
-
- # this tests that '--print X' overrides '--verbose'
- test_switches(['--print', 'actual'], 'actual', verbose=True)
-
-
class Testprinter(unittest.TestCase):
def assertEmpty(self, stream):
@@ -102,7 +72,7 @@ class Testprinter(unittest.TestCase):
stream.buflist = []
stream.buf = ''
- def get_printer(self, args=None, tty=False):
+ def get_printer(self, args=None):
args = args or []
printing_options = printing.print_options()
option_parser = optparse.OptionParser(option_list=printing_options)
@@ -112,7 +82,6 @@ class Testprinter(unittest.TestCase):
nproc = 2
regular_output = StringIO.StringIO()
- regular_output.isatty = lambda: tty
buildbot_output = StringIO.StringIO()
printer = printing.Printer(self._port, options, regular_output, buildbot_output)
return printer, regular_output, buildbot_output
@@ -130,62 +99,19 @@ class Testprinter(unittest.TestCase):
port.test_expectations_overrides = lambda: None
expectations = test_expectations.TestExpectations(self._port, test_names)
- rs = result_summary.ResultSummary(expectations, test_names)
+ rs = result_summary.ResultSummary(expectations, test_names, 1, set())
return test_names, rs, expectations
- def test_help_printer(self):
- # Here and below we'll call the "regular" printer err and the
- # buildbot printer out; this corresponds to how things run on the
- # bots with stderr and stdout.
- printer, err, out = self.get_printer()
-
- # This routine should print something to stdout. testing what it is
- # is kind of pointless.
- printer.help_printing()
- self.assertNotEmpty(err)
- self.assertEmpty(out)
-
- def do_switch_tests(self, method_name, switch, to_buildbot,
- message='hello', exp_err=None, exp_bot=None):
- def do_helper(method_name, switch, message, exp_err, exp_bot):
- printer, err, bot = self.get_printer(['--print', switch], tty=True)
- getattr(printer, method_name)(message)
- self.assertEqual(err.buflist, exp_err)
- self.assertEqual(bot.buflist, exp_bot)
-
- if to_buildbot:
- if exp_err is None:
- exp_err = []
- if exp_bot is None:
- exp_bot = [message + "\n"]
- else:
- if exp_err is None:
- exp_err = [message + "\n"]
- if exp_bot is None:
- exp_bot = []
- do_helper(method_name, 'nothing', 'hello', [], [])
- do_helper(method_name, switch, 'hello', exp_err, exp_bot)
- do_helper(method_name, 'everything', 'hello', exp_err, exp_bot)
-
def test_configure_and_cleanup(self):
# This test verifies that calling cleanup repeatedly and deleting
# the object is safe.
- printer, err, out = self.get_printer(['--print', 'everything'])
+ printer, err, out = self.get_printer()
printer.cleanup()
printer.cleanup()
printer = None
- def test_print_actual(self):
- # Actual results need to be logged to the buildbot's stream.
- self.do_switch_tests('print_actual', 'actual', to_buildbot=True)
-
- def test_print_actual_buildbot(self):
- # FIXME: Test that the format of the actual results matches what the
- # buildbot is expecting.
- pass
-
- def test_fallback_path_in_config(self):
- printer, err, out = self.get_printer(['--print', 'everything'])
+ def test_print_config(self):
+ printer, err, out = self.get_printer()
# FIXME: it's lame that i have to set these options directly.
printer._options.results_directory = '/tmp'
printer._options.pixel_tests = True
@@ -195,201 +121,32 @@ class Testprinter(unittest.TestCase):
printer.print_config()
self.assertTrue('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
- def test_print_config(self):
- self.do_switch_tests('_print_config', 'config', to_buildbot=False)
-
- def test_print_expected(self):
- self.do_switch_tests('_print_expected', 'expected', to_buildbot=False)
-
- def test_print_timing(self):
- self.do_switch_tests('print_timing', 'timing', to_buildbot=False)
-
- def test_write_update(self):
- # Note that there shouldn't be a carriage return here; updates()
- # are meant to be overwritten.
- self.do_switch_tests('write_update', 'updates', to_buildbot=False,
- message='hello', exp_err=['hello'])
+ self.reset(err)
+ printer._options.quiet = True
+ printer.print_config()
+ self.assertFalse('Baseline search path: test-mac-leopard -> test-mac-snowleopard -> generic' in err.getvalue())
def test_print_one_line_summary(self):
- printer, err, out = self.get_printer(['--print', 'nothing'])
- printer.print_one_line_summary(1, 1, 0)
- self.assertEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'one-line-summary'])
- printer.print_one_line_summary(1, 1, 0)
+ printer, err, out = self.get_printer()
+ printer._print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["The test ran as expected.\n", "\n"])
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_one_line_summary(1, 1, 0)
+ printer, err, out = self.get_printer()
+ printer._print_one_line_summary(1, 1, 0)
self.assertWritten(err, ["The test ran as expected.\n", "\n"])
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_one_line_summary(2, 1, 1)
- self.assertWritten(err, ["1 test ran as expected, 1 didn't:\n", "\n"])
+ printer, err, out = self.get_printer()
+ printer._print_one_line_summary(2, 1, 1)
+ self.assertWritten(err, ["\n", "1 test ran as expected, 1 didn't:\n", "\n"])
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_one_line_summary(3, 2, 1)
- self.assertWritten(err, ["2 tests ran as expected, 1 didn't:\n", "\n"])
+ printer, err, out = self.get_printer()
+ printer._print_one_line_summary(3, 2, 1)
+ self.assertWritten(err, ["\n", "2 tests ran as expected, 1 didn't:\n", "\n"])
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_one_line_summary(3, 2, 0)
+ printer, err, out = self.get_printer()
+ printer._print_one_line_summary(3, 2, 0)
self.assertWritten(err, ['\n', "2 tests ran as expected (1 didn't run).\n", '\n'])
-
- def test_print_test_result(self):
- # Note here that we don't use meaningful exp_str and got_str values;
- # the actual contents of the string are treated opaquely by
- # print_test_result() when tracing, and usually we don't want
- # to test what exactly is printed, just that something
- # was printed (or that nothing was printed).
- #
- # FIXME: this is actually some goofy layering; it would be nice
- # we could refactor it so that the args weren't redundant. Maybe
- # the TestResult should contain what was expected, and the
- # strings could be derived from the TestResult?
- printer, err, out = self.get_printer(['--print', 'nothing'])
- result = self.get_result('passes/image.html')
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'unexpected'])
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertEmpty(err)
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertWritten(err, [' passes/image.html -> unexpected pass\n'])
-
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertEmpty(err)
-
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertWritten(err, [' passes/image.html -> unexpected pass\n'])
-
- printer, err, out = self.get_printer(['--print', 'nothing'])
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertEmpty(err)
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertEmpty(err)
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- result = self.get_result("passes/text.html")
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print',
- 'trace-unexpected'])
- result = self.get_result("passes/text.html")
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'trace-everything'])
- result = self.get_result('passes/image.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- result = self.get_result('failures/expected/missing_text.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- result = self.get_result('failures/expected/missing_check.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- result = self.get_result('failures/expected/missing_image.html')
- printer.print_test_result(result, expected=True, exp_str='',
- got_str='')
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'trace-everything'])
- result = self.get_result('passes/image.html')
- printer.print_test_result(result, expected=False, exp_str='',
- got_str='')
-
- def test_print_progress(self):
- expectations = ''
-
- printer, err, out = self.get_printer(['--print', 'nothing'])
- tests = ['passes/text.html', 'failures/expected/timeout.html',
- 'failures/expected/crash.html']
- paths, rs, exp = self.get_result_summary(tests, expectations)
-
- # First, test that we print nothing when we shouldn't print anything.
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertEmpty(err)
-
- printer.print_progress(rs, True, paths)
- self.assertEmpty(out)
- self.assertEmpty(err)
-
- # Now test that we do print things.
- printer, err, out = self.get_printer(['--print', 'one-line-progress'])
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'one-line-progress'])
- printer.print_progress(rs, True, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'one-line-progress'])
- rs.remaining = 0
- printer.print_progress(rs, False, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
- printer.print_progress(rs, True, paths)
- self.assertEmpty(out)
- self.assertNotEmpty(err)
-
-
-
- def test_write_nothing(self):
- printer, err, out = self.get_printer(['--print', 'nothing'])
- printer.write("foo")
- self.assertEmpty(err)
-
- def test_write_misc(self):
- printer, err, out = self.get_printer(['--print', 'misc'])
- printer.write("foo")
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'misc'])
- printer.write("foo", "config")
- self.assertEmpty(err)
-
- def test_write_everything(self):
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.write("foo")
- self.assertNotEmpty(err)
-
- printer, err, out = self.get_printer(['--print', 'everything'])
- printer.write("foo", "config")
- self.assertNotEmpty(err)
-
- def test_write_verbose(self):
- printer, err, out = self.get_printer(['--verbose'])
- printer.write("foo")
- self.assertTrue("foo" in err.buflist[0])
- self.assertEmpty(out)
-
def test_print_unexpected_results(self):
# This routine is the only one that prints stuff that the bots
# care about.
@@ -442,37 +199,31 @@ class Testprinter(unittest.TestCase):
tests = ['passes/text.html', 'failures/expected/timeout.html', 'failures/expected/crash.html']
expectations = ''
- printer, err, out = self.get_printer(['--print', 'nothing'])
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- self.assertEmpty(err)
- self.assertEmpty(out)
-
- printer, err, out = self.get_printer(['--print', 'unexpected-results'])
+ printer, err, out = self.get_printer()
# test everything running as expected
ur = get_unexpected_results(expected=True, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
+ printer._print_unexpected_results(ur)
self.assertEmpty(err)
self.assertEmpty(out)
# test failures
- printer, err, out = self.get_printer(['--print', 'unexpected-results'])
+ printer, err, out = self.get_printer()
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
+ printer._print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
# test unexpected flaky
- printer, err, out = self.get_printer(['--print', 'unexpected-results'])
+ printer, err, out = self.get_printer()
ur = get_unexpected_results(expected=False, passing=False, flaky=True)
- printer.print_unexpected_results(ur)
+ printer._print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
- printer, err, out = self.get_printer(['--print', 'everything'])
+ printer, err, out = self.get_printer()
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
+ printer._print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
@@ -480,30 +231,30 @@ class Testprinter(unittest.TestCase):
BUGX : failures/expected/crash.html = CRASH
BUGX : failures/expected/timeout.html = TIMEOUT
"""
- printer, err, out = self.get_printer(['--print', 'unexpected-results'])
+ printer, err, out = self.get_printer()
ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
+ printer._print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
- printer, err, out = self.get_printer(['--print', 'unexpected-results'])
+ printer, err, out = self.get_printer()
ur = get_unexpected_results(expected=False, passing=True, flaky=False)
- printer.print_unexpected_results(ur)
+ printer._print_unexpected_results(ur)
self.assertEmpty(err)
self.assertNotEmpty(out)
- # Test handling of --verbose as well.
- printer, err, out = self.get_printer(['--verbose'])
- ur = get_unexpected_results(expected=False, passing=False, flaky=False)
- printer.print_unexpected_results(ur)
- # FIXME: debug output from the port and scm objects may or may not go
- # to stderr, so there's no point in testing its contents here.
- self.assertNotEmpty(out)
-
def test_print_unexpected_results_buildbot(self):
# FIXME: Test that print_unexpected_results() produces the printer the
# buildbot is expecting.
pass
+ def test_details(self):
+ printer, err, _ = self.get_printer(['--details'])
+ result = self.get_result('passes/image.html')
+ printer.print_started_test('passes/image.html')
+ printer.print_finished_test(result, expected=False, exp_str='', got_str='')
+ self.assertNotEmpty(err)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest.py b/Tools/Scripts/webkitpy/performance_tests/perftest.py
index 8aac78f3d..b111c9b4b 100644
--- a/Tools/Scripts/webkitpy/performance_tests/perftest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest.py
@@ -103,8 +103,6 @@ class PerfTest(object):
# Following is for html5.html
re.compile(re.escape("""Blocked access to external URL http://www.whatwg.org/specs/web-apps/current-work/"""))]
- _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max']
-
def _should_ignore_line_in_parser_test_result(self, line):
if not line:
return True
@@ -113,48 +111,65 @@ class PerfTest(object):
return True
return False
+ _description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
+ _result_classes = ['Time', 'JS Heap', 'FastMalloc']
+ _result_class_regex = re.compile(r'^(?P<resultclass>' + r'|'.join(_result_classes) + '):')
+ _statistics_keys = ['avg', 'median', 'stdev', 'min', 'max', 'unit']
+ _score_regex = re.compile(r'^(?P<key>' + r'|'.join(_statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
+
def parse_output(self, output):
- got_a_result = False
test_failed = False
results = {}
- score_regex = re.compile(r'^(?P<key>' + r'|'.join(self._statistics_keys) + r')\s+(?P<value>[0-9\.]+)\s*(?P<unit>.*)')
- description_regex = re.compile(r'^Description: (?P<description>.*)$', re.IGNORECASE)
+ ordered_results_keys = []
+ test_name = re.sub(r'\.\w+$', '', self._test_name)
description_string = ""
- unit = "ms"
-
+ result_class = ""
for line in re.split('\n', output.text):
- description = description_regex.match(line)
+ description = self._description_regex.match(line)
if description:
description_string = description.group('description')
continue
- score = score_regex.match(line)
+ result_class_match = self._result_class_regex.match(line)
+ if result_class_match:
+ result_class = result_class_match.group('resultclass')
+ continue
+
+ score = self._score_regex.match(line)
if score:
- results[score.group('key')] = float(score.group('value'))
- if score.group('unit'):
- unit = score.group('unit')
+ key = score.group('key')
+ value = float(score.group('value'))
+ unit = score.group('unit')
+ name = test_name
+ if result_class != 'Time':
+ name += ':' + result_class.replace(' ', '')
+ if name not in ordered_results_keys:
+ ordered_results_keys.append(name)
+ results.setdefault(name, {})
+ results[name]['unit'] = unit
+ results[name][key] = value
continue
if not self._should_ignore_line_in_parser_test_result(line):
test_failed = True
_log.error(line)
- if test_failed or set(self._statistics_keys) != set(results.keys()):
+ if test_failed or set(self._statistics_keys) != set(results[test_name].keys()):
return None
- results['unit'] = unit
-
- test_name = re.sub(r'\.\w+$', '', self._test_name)
- self.output_statistics(test_name, results, description_string)
-
- return {test_name: results}
+ for result_name in ordered_results_keys:
+ if result_name == test_name:
+ self.output_statistics(result_name, results[result_name], description_string)
+ else:
+ self.output_statistics(result_name, results[result_name])
+ return results
- def output_statistics(self, test_name, results, description_string):
+ def output_statistics(self, test_name, results, description_string=None):
unit = results['unit']
if description_string:
_log.info('DESCRIPTION: %s' % description_string)
- _log.info('RESULT %s= %s %s' % (test_name.replace('/', ': '), results['avg'], unit))
- _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:]]))
+ _log.info('RESULT %s= %s %s' % (test_name.replace(':', ': ').replace('/', ': '), results['avg'], unit))
+ _log.info(', '.join(['%s= %s %s' % (key, results[key], unit) for key in self._statistics_keys[1:5]]))
class ChromiumStylePerfTest(PerfTest):
@@ -165,7 +180,6 @@ class ChromiumStylePerfTest(PerfTest):
def parse_output(self, output):
test_failed = False
- got_a_result = False
results = {}
for line in re.split('\n', output.text):
resultLine = ChromiumStylePerfTest._chromium_style_result_regex.match(line)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
index 4fca894da..47fe6231c 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftest_unittest.py
@@ -49,11 +49,12 @@ class MainTest(unittest.TestCase):
'Running 20 times',
'Ignoring warm-up run (1115)',
'',
- 'avg 1100',
- 'median 1101',
- 'stdev 11',
- 'min 1080',
- 'max 1120']), image=None, image_hash=None, audio=None)
+ 'Time:',
+ 'avg 1100 ms',
+ 'median 1101 ms',
+ 'stdev 11 ms',
+ 'min 1080 ms',
+ 'max 1120 ms']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
@@ -61,6 +62,7 @@ class MainTest(unittest.TestCase):
self.assertEqual(test.parse_output(output),
{'some-test': {'avg': 1100.0, 'median': 1101.0, 'min': 1080.0, 'max': 1120.0, 'stdev': 11.0, 'unit': 'ms'}})
finally:
+ pass
actual_stdout, actual_stderr, actual_logs = output_capture.restore_output()
self.assertEqual(actual_stdout, '')
self.assertEqual(actual_stderr, '')
@@ -73,11 +75,12 @@ class MainTest(unittest.TestCase):
'',
'some-unrecognizable-line',
'',
- 'avg 1100',
- 'median 1101',
- 'stdev 11',
- 'min 1080',
- 'max 1120']), image=None, image_hash=None, audio=None)
+ 'Time:'
+ 'avg 1100 ms',
+ 'median 1101 ms',
+ 'stdev 11 ms',
+ 'min 1080 ms',
+ 'max 1120 ms']), image=None, image_hash=None, audio=None)
output_capture = OutputCapture()
output_capture.capture_output()
try:
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
index cda3a6b59..b8a2ee4b3 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py
@@ -49,10 +49,13 @@ _log = logging.getLogger(__name__)
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
- _EXIT_CODE_BAD_BUILD = -1
- _EXIT_CODE_BAD_JSON = -2
- _EXIT_CODE_FAILED_UPLOADING = -3
- _EXIT_CODE_BAD_PREPARATION = -4
+ EXIT_CODE_BAD_BUILD = -1
+ EXIT_CODE_BAD_SOURCE_JSON = -2
+ EXIT_CODE_BAD_MERGE = -3
+ EXIT_CODE_FAILED_UPLOADING = -4
+ EXIT_CODE_BAD_PREPARATION = -5
+
+ _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
@@ -93,10 +96,16 @@ class PerfTestsRunner(object):
help="Set the timeout for each test"),
optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
help="Pause before running the tests to let user attach a performance monitor."),
+ optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
+ help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path",
- help="Filename of the JSON file that summaries the results."),
+ help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--source-json-path",
- help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
+ help="Only used on bots. Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
+ optparse.make_option("--description",
+ help="Add a description to the output JSON file if one is generated"),
+ optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
+ help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
@@ -142,86 +151,104 @@ class PerfTestsRunner(object):
def run(self):
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
- return self._EXIT_CODE_BAD_BUILD
+ return self.EXIT_CODE_BAD_BUILD
tests = self._collect_tests()
_log.info("Running %d tests" % len(tests))
for test in tests:
if not test.prepare(self._options.time_out_ms):
- return self._EXIT_CODE_BAD_PREPARATION
+ return self.EXIT_CODE_BAD_PREPARATION
unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
-
- options = self._options
- if self._options.output_json_path:
- # FIXME: Add --branch or auto-detect the branch we're in
- test_results_server = options.test_results_server
- branch = self._default_branch if test_results_server else None
- build_number = int(options.build_number) if options.build_number else None
-
- if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
- not test_results_server,
- branch, options.platform, options.builder_name, build_number) and not unexpected:
- return self._EXIT_CODE_BAD_JSON
-
- if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
- return self._EXIT_CODE_FAILED_UPLOADING
+ if self._options.generate_results:
+ exit_code = self._generate_and_show_results()
+ if exit_code:
+ return exit_code
return unexpected
- def _generate_json(self, timestamp, output_json_path, source_json_path, should_generate_results_page,
- branch, platform, builder_name, build_number):
+ def _output_json_path(self):
+ output_json_path = self._options.output_json_path
+ if output_json_path:
+ return output_json_path
+ return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
- contents = {'timestamp': int(timestamp), 'results': self._results}
+ def _generate_and_show_results(self):
+ options = self._options
+ output_json_path = self._output_json_path()
+ output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
+
+ if options.source_json_path:
+ output = self._merge_source_json(options.source_json_path, output)
+ if not output:
+ return self.EXIT_CODE_BAD_SOURCE_JSON
+
+ test_results_server = options.test_results_server
+ results_page_path = None
+ if not test_results_server:
+ output = self._merge_outputs(output_json_path, output)
+ if not output:
+ return self.EXIT_CODE_BAD_MERGE
+ results_page_path = self._host.filesystem.splitext(output_json_path)[0] + '.html'
+
+ self._generate_output_files(output_json_path, results_page_path, output)
+
+ if test_results_server:
+ if not self._upload_json(test_results_server, output_json_path):
+ return self.EXIT_CODE_FAILED_UPLOADING
+ elif options.show_results:
+ self._port.show_results_html_file(results_page_path)
+
+ def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
+ contents = {'results': self._results}
+ if description:
+ contents['description'] = description
for (name, path) in self._port.repository_paths():
contents[name + '-revision'] = self._host.scm().svn_revision(path)
- for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
+ # FIXME: Add --branch or auto-detect the branch we're in
+ for key, value in {'timestamp': int(timestamp), 'branch': self._default_branch, 'platform': platform,
+ 'builder-name': builder_name, 'build-number': int(build_number) if build_number else None}.items():
if value:
contents[key] = value
- filesystem = self._host.filesystem
- succeeded = False
- if source_json_path:
- try:
- source_json_file = filesystem.open_text_file_for_reading(source_json_path)
- source_json = json.load(source_json_file)
- contents = dict(source_json.items() + contents.items())
- succeeded = True
- except IOError, error:
- _log.error("Failed to read %s: %s" % (source_json_path, error))
- except ValueError, error:
- _log.error("Failed to parse %s: %s" % (source_json_path, error))
- except TypeError, error:
- _log.error("Failed to merge JSON files: %s" % error)
- if not succeeded:
- return False
-
- if should_generate_results_page:
- if filesystem.isfile(output_json_path):
- existing_contents = json.loads(filesystem.read_text_file(output_json_path))
- existing_contents.append(contents)
- contents = existing_contents
- else:
- contents = [contents]
+ return contents
- serialized_contents = json.dumps(contents)
- filesystem.write_text_file(output_json_path, serialized_contents)
+ def _merge_source_json(self, source_json_path, output):
+ try:
+ source_json_file = self._host.filesystem.open_text_file_for_reading(source_json_path)
+ source_json = json.load(source_json_file)
+ return dict(source_json.items() + output.items())
+ except Exception, error:
+ _log.error("Failed to merge source JSON file %s: %s" % (source_json_path, error))
+ return None
+
+ def _merge_outputs(self, output_json_path, output):
+ if not self._host.filesystem.isfile(output_json_path):
+ return [output]
+ try:
+ existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
+ return existing_outputs + [output]
+ except Exception, error:
+ _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
+ return None
- if should_generate_results_page:
- jquery_path = filesystem.join(self._port.perf_tests_dir(), 'Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js')
- jquery = filesystem.read_text_file(jquery_path)
+ def _generate_output_files(self, output_json_path, results_page_path, output):
+ filesystem = self._host.filesystem
+
+ json_output = json.dumps(output)
+ filesystem.write_text_file(output_json_path, json_output)
+ if results_page_path:
template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
template = filesystem.read_text_file(template_path)
- results_page = template.replace('<?WebKitPerfTestRunnerInsertionPoint?>',
- '<script>%s</script><script id="json">%s</script>' % (jquery, serialized_contents))
+ absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
+ results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
+ results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
- filesystem.write_text_file(filesystem.splitext(output_json_path)[0] + '.html', results_page)
-
- return True
+ filesystem.write_text_file(results_page_path, results_page)
def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
diff --git a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
index 389201521..ef459cd69 100755
--- a/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
+++ b/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_unittest.py
@@ -87,21 +87,48 @@ Ignoring warm-up run (1502)
1475
1471
-avg 1489.05
-median 1487
-stdev 14.46
-min 1471
-max 1510
+Time:
+avg 1489.05 ms
+median 1487 ms
+stdev 14.46 ms
+min 1471 ms
+max 1510 ms
"""
elif driver_input.test_name.endswith('some-parser.html'):
text = """Running 20 times
Ignoring warm-up run (1115)
-avg 1100
-median 1101
-stdev 11
-min 1080
-max 1120
+Time:
+avg 1100 ms
+median 1101 ms
+stdev 11 ms
+min 1080 ms
+max 1120 ms
+"""
+ elif driver_input.test_name.endswith('memory-test.html'):
+ text = """Running 20 times
+Ignoring warm-up run (1115)
+
+Time:
+avg 1100 ms
+median 1101 ms
+stdev 11 ms
+min 1080 ms
+max 1120 ms
+
+JS Heap:
+avg 832000 bytes
+median 829000 bytes
+stdev 15000 bytes
+min 811000 bytes
+max 848000 bytes
+
+FastMalloc:
+avg 532000 bytes
+median 529000 bytes
+stdev 13000 bytes
+min 511000 bytes
+max 548000 bytes
"""
return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
@@ -120,6 +147,9 @@ max 1120
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
+
+ filesystem = runner._host.filesystem
+ runner.load_output_json = lambda: json.loads(filesystem.read_text_file(runner._output_json_path()))
return runner, test_port
def run_test(self, test_name):
@@ -225,6 +255,33 @@ max 1120
'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
'', '']))
+ def test_run_memory_test(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ runner._timestamp = 123456789
+ port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
+
+ output = OutputCapture()
+ output.capture_output()
+ try:
+ unexpected_result_count = runner.run()
+ finally:
+ stdout, stderr, log = output.restore_output()
+ self.assertEqual(unexpected_result_count, 0)
+ self.assertEqual(log, '\n'.join([
+ 'Running 1 tests',
+ 'Running Parser/memory-test.html (1 of 1)',
+ 'RESULT Parser: memory-test= 1100.0 ms',
+ 'median= 1101.0 ms, stdev= 11.0 ms, min= 1080.0 ms, max= 1120.0 ms',
+ 'RESULT Parser: memory-test: JSHeap= 832000.0 bytes',
+ 'median= 829000.0 bytes, stdev= 15000.0 bytes, min= 811000.0 bytes, max= 848000.0 bytes',
+ 'RESULT Parser: memory-test: FastMalloc= 532000.0 bytes',
+ 'median= 529000.0 bytes, stdev= 13000.0 bytes, min= 511000.0 bytes, max= 548000.0 bytes',
+ '', '']))
+ results = runner.load_output_json()[0]['results']
+ self.assertEqual(results['Parser/memory-test'], {'min': 1080.0, 'max': 1120.0, 'median': 1101.0, 'stdev': 11.0, 'avg': 1100.0, 'unit': 'ms'})
+ self.assertEqual(results['Parser/memory-test:JSHeap'], {'min': 811000.0, 'max': 848000.0, 'median': 829000.0, 'stdev': 15000.0, 'avg': 832000.0, 'unit': 'bytes'})
+ self.assertEqual(results['Parser/memory-test:FastMalloc'], {'min': 511000.0, 'max': 548000.0, 'median': 529000.0, 'stdev': 13000.0, 'avg': 532000.0, 'unit': 'bytes'})
+
def _test_run_with_json_output(self, runner, filesystem, upload_suceeds=True, expected_exit_code=0):
filesystem.write_text_file(runner._base_path + '/inspector/pass.html', 'some content')
filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
@@ -246,78 +303,148 @@ max 1120
finally:
stdout, stderr, logs = output_capture.restore_output()
- self.assertEqual(logs, '\n'.join([
- 'Running 2 tests',
- 'Running Bindings/event-target-wrapper.html (1 of 2)',
- 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
- 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
- '',
- 'Running inspector/pass.html (2 of 2)',
- 'RESULT group_name: test_name= 42 ms',
- '',
- '']))
+ if not expected_exit_code:
+ self.assertEqual(logs, '\n'.join([
+ 'Running 2 tests',
+ 'Running Bindings/event-target-wrapper.html (1 of 2)',
+ 'RESULT Bindings: event-target-wrapper= 1489.05 ms',
+ 'median= 1487.0 ms, stdev= 14.46 ms, min= 1471.0 ms, max= 1510.0 ms',
+ '',
+ 'Running inspector/pass.html (2 of 2)',
+ 'RESULT group_name: test_name= 42 ms',
+ '',
+ '']))
return uploaded[0]
+ _event_target_wrapper_and_inspector_results = {
+ "Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
+ "inspector/pass.html:group_name:test_name": 42}
+
def test_run_with_json_output(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem)
- self.assertEqual(json.loads(port.host.filesystem.read_text_file('/mock-checkout/output.json')), {
- "timestamp": 123456789, "results":
- {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "inspector/pass.html:group_name:test_name": 42},
- "webkit-revision": 5678, "branch": "webkit-trunk"})
-
- def test_run_generates_results_page(self):
- runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json'])
+ self.assertEqual(runner.load_output_json(), {
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"})
+
+ def test_run_with_description(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--description', 'some description'])
+ self._test_run_with_json_output(runner, port.host.filesystem)
+ self.assertEqual(runner.load_output_json(), {
+ "timestamp": 123456789, "description": "some description",
+ "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"})
+
+ def create_runner_and_setup_results_template(self, args=[]):
+ runner, port = self.create_runner(args)
filesystem = port.host.filesystem
- print runner._base_path + '/resources/results-template.html'
filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
- 'BEGIN<?WebKitPerfTestRunnerInsertionPoint?>END')
- filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js',
- 'jquery content')
+ 'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
+ '<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
+ filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
+ return runner, port
+ def test_run_respects_no_results(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--test-results-server=some.host', '--no-results'])
+ self.assertFalse(self._test_run_with_json_output(runner, port.host.filesystem))
+ self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
+
+ def test_run_generates_json_by_default(self):
+ runner, port = self.create_runner_and_setup_results_template()
+ filesystem = port.host.filesystem
+ output_json_path = filesystem.join(port.perf_results_directory(), runner._DEFAULT_JSON_FILENAME)
+ results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
+
+ self.assertFalse(filesystem.isfile(output_json_path))
+ self.assertFalse(filesystem.isfile(results_page_path))
+
+ self._test_run_with_json_output(runner, port.host.filesystem)
+
+ self.assertEqual(json.loads(port.host.filesystem.read_text_file(output_json_path)), [{
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"}])
+
+ self.assertTrue(filesystem.isfile(output_json_path))
+ self.assertTrue(filesystem.isfile(results_page_path))
+
+ def test_run_generates_and_show_results_page(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ page_shown = []
+ port.show_results_html_file = lambda path: page_shown.append(path)
+ filesystem = port.host.filesystem
self._test_run_with_json_output(runner, filesystem)
- expected_entry = {"timestamp": 123456789, "results": {"Bindings/event-target-wrapper":
- {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "inspector/pass.html:group_name:test_name": 42}, "webkit-revision": 5678}
+ expected_entry = {"timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk"}
self.maxDiff = None
json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
self.assertEqual(json.loads(json_output), [expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
- 'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
+ 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+ '<script>%s</script>END' % json_output)
+ self.assertEqual(page_shown[0], '/mock-checkout/output.html')
self._test_run_with_json_output(runner, filesystem)
json_output = port.host.filesystem.read_text_file('/mock-checkout/output.json')
self.assertEqual(json.loads(json_output), [expected_entry, expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
- 'BEGIN<script>jquery content</script><script id="json">' + json_output + '</script>END')
+ 'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
+ '<script>%s</script>END' % json_output)
+
+ def test_run_respects_no_show_results(self):
+ show_results_html_file = lambda path: page_shown.append(path)
+
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ page_shown = []
+ port.show_results_html_file = show_results_html_file
+ self._test_run_with_json_output(runner, port.host.filesystem)
+ self.assertEqual(page_shown[0], '/mock-checkout/output.html')
+
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
+ '--no-show-results'])
+ page_shown = []
+ port.show_results_html_file = show_results_html_file
+ self._test_run_with_json_output(runner, port.host.filesystem)
+ self.assertEqual(page_shown, [])
+
+ def test_run_with_bad_output_json(self):
+ runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
+ port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
+ port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
def test_run_with_json_source(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
port.host.filesystem.write_text_file('/mock-checkout/source.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem)
- self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
- "timestamp": 123456789, "results":
- {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "inspector/pass.html:group_name:test_name": 42},
- "webkit-revision": 5678, "branch": "webkit-trunk",
- "key": "value"})
+ self.assertEqual(runner.load_output_json(), {
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "branch": "webkit-trunk", "key": "value"})
+
+ def test_run_with_bad_json_source(self):
+ runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
+ '--source-json-path=/mock-checkout/source.json', '--test-results-server=some.host'])
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+ port.host.filesystem.write_text_file('/mock-checkout/source.json', 'bad json')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
+ port.host.filesystem.write_text_file('/mock-checkout/source.json', '["another bad json"]')
+ self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
def test_run_with_multiple_repositories(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem)
- self.assertEqual(json.loads(port.host.filesystem.files['/mock-checkout/output.json']), {
- "timestamp": 123456789, "results":
- {"Bindings/event-target-wrapper": {"max": 1510, "avg": 1489.05, "median": 1487, "min": 1471, "stdev": 14.46, "unit": "ms"},
- "inspector/pass.html:group_name:test_name": 42.0},
- "webkit-revision": 5678, "some-revision": 5678, "branch": "webkit-trunk"})
+ self.assertEqual(runner.load_output_json(), {
+ "timestamp": 123456789, "results": self._event_target_wrapper_and_inspector_results,
+ "webkit-revision": "5678", "some-revision": "5678", "branch": "webkit-trunk"})
def test_run_with_upload_json(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
@@ -329,7 +456,7 @@ max 1120
self.assertEqual(generated_json['builder-name'], 'builder1')
self.assertEqual(generated_json['build-number'], 123)
- self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=-3)
+ self._test_run_with_json_output(runner, port.host.filesystem, upload_suceeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
def test_upload_json(self):
runner, port = self.create_runner()
diff --git a/Tools/Scripts/webkitpy/test/finder.py b/Tools/Scripts/webkitpy/test/finder.py
index fcbb0e9cf..21eceac06 100644
--- a/Tools/Scripts/webkitpy/test/finder.py
+++ b/Tools/Scripts/webkitpy/test/finder.py
@@ -25,7 +25,6 @@
import logging
import re
-import sys
_log = logging.getLogger(__name__)
@@ -77,10 +76,14 @@ class Finder(object):
def __init__(self, filesystem):
self.filesystem = filesystem
self.trees = []
+ self._names_to_skip = []
def add_tree(self, top_directory, starting_subdirectory=None):
self.trees.append(_DirectoryTree(self.filesystem, top_directory, starting_subdirectory))
+ def skip(self, names, reason, bugid):
+ self._names_to_skip.append(tuple([names, reason, bugid]))
+
def additional_paths(self, paths):
return [tree.top_directory for tree in self.trees if tree.top_directory not in paths]
@@ -101,18 +104,15 @@ class Finder(object):
return tree.to_module(path)
return None
- def find_names(self, args, skip_integrationtests, find_all, skip_if_parallel=True):
- suffixes = ['_unittest.py']
- if not skip_integrationtests:
- suffixes.append('_integrationtest.py')
-
+ def find_names(self, args, find_all):
+ suffixes = ['_unittest.py', '_integrationtest.py']
if args:
names = []
for arg in args:
names.extend(self._find_names_for_arg(arg, suffixes))
return names
- return self._default_names(suffixes, find_all, skip_if_parallel)
+ return self._default_names(suffixes, find_all)
def _find_names_for_arg(self, arg, suffixes):
realpath = self.filesystem.realpath(arg)
@@ -145,7 +145,7 @@ class Finder(object):
return tree.find_modules(suffixes, path)
return []
- def _default_names(self, suffixes, find_all, skip_if_parallel):
+ def _default_names(self, suffixes, find_all):
modules = []
for tree in self.trees:
modules.extend(tree.find_modules(suffixes))
@@ -154,16 +154,9 @@ class Finder(object):
for module in modules:
_log.debug("Found: %s" % module)
- # FIXME: Figure out how to move this to test-webkitpy in order to to make this file more generic.
if not find_all:
- slow_tests = ('webkitpy.common.checkout.scm.scm_unittest',)
- self._exclude(modules, slow_tests, 'are really, really slow', 31818)
-
- if sys.platform == 'win32':
- win32_blacklist = ('webkitpy.common.checkout',
- 'webkitpy.common.config',
- 'webkitpy.tool')
- self._exclude(modules, win32_blacklist, 'fail horribly on win32', 54526)
+ for (names, reason, bugid) in self._names_to_skip:
+ self._exclude(modules, names, reason, bugid)
return modules
diff --git a/Tools/Scripts/webkitpy/test/finder_unittest.py b/Tools/Scripts/webkitpy/test/finder_unittest.py
index 386c579c7..5c808a17e 100644
--- a/Tools/Scripts/webkitpy/test/finder_unittest.py
+++ b/Tools/Scripts/webkitpy/test/finder_unittest.py
@@ -79,17 +79,15 @@ class FinderTest(unittest.TestCase):
self.finder.clean_trees()
self.assertFalse(self.fs.exists('/foo2/bar2/missing.pyc'))
- def check_names(self, names, expected_names, skip_integrationtests=False, find_all=False):
- self.assertEquals(self.finder.find_names(names, skip_integrationtests, find_all),
- expected_names)
+ def check_names(self, names, expected_names, find_all=True):
+ self.assertEquals(self.finder.find_names(names, find_all), expected_names)
def test_default_names(self):
- self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'])
- self.check_names([], ['bar.baz_unittest'], skip_integrationtests=True, find_all=True)
- self.check_names([], ['bar.baz_unittest'], skip_integrationtests=True, find_all=False)
+ self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=True)
+ self.check_names([], ['bar.baz_unittest', 'bar2.baz2_integrationtest'], find_all=False)
# Should return the names given it, even if they don't exist.
- self.check_names(['foobar'], ['foobar'], skip_integrationtests=True, find_all=False)
+ self.check_names(['foobar'], ['foobar'], find_all=False)
def test_paths(self):
self.fs.chdir('/foo/bar')
diff --git a/Tools/Scripts/webkitpy/test/main.py b/Tools/Scripts/webkitpy/test/main.py
index 986af56b8..28de8a6e0 100644
--- a/Tools/Scripts/webkitpy/test/main.py
+++ b/Tools/Scripts/webkitpy/test/main.py
@@ -29,13 +29,14 @@ import optparse
import os
import StringIO
import sys
+import time
import traceback
import unittest
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.test.finder import Finder
from webkitpy.test.printer import Printer
-from webkitpy.test.runner import Runner
+from webkitpy.test.runner import Runner, unit_test_name
_log = logging.getLogger(__name__)
@@ -48,7 +49,11 @@ def main():
tester.add_tree(os.path.join(webkit_root, 'Tools', 'Scripts'), 'webkitpy')
tester.add_tree(os.path.join(webkit_root, 'Source', 'WebKit2', 'Scripts'), 'webkit2')
- # FIXME: Do we need to be able to test QueueStatusServer on Windows as well?
+ tester.skip(('webkitpy.common.checkout.scm.scm_unittest',), 'are really, really, slow', 31818)
+ if sys.platform == 'win32':
+ tester.skip(('webkitpy.common.checkout', 'webkitpy.common.config', 'webkitpy.tool'), 'fail horribly on win32', 54526)
+
+ # This only needs to run on Unix, so don't worry about win32 for now.
appengine_sdk_path = '/usr/local/google_appengine'
if os.path.exists(appengine_sdk_path):
if not appengine_sdk_path in sys.path:
@@ -73,24 +78,27 @@ class Tester(object):
def add_tree(self, top_directory, starting_subdirectory=None):
self.finder.add_tree(top_directory, starting_subdirectory)
+ def skip(self, names, reason, bugid):
+ self.finder.skip(names, reason, bugid)
+
def _parse_args(self):
parser = optparse.OptionParser(usage='usage: %prog [options] [args...]')
parser.add_option('-a', '--all', action='store_true', default=False,
help='run all the tests')
parser.add_option('-c', '--coverage', action='store_true', default=False,
help='generate code coverage info (requires http://pypi.python.org/pypi/coverage)')
+ parser.add_option('-i', '--integration-tests', action='store_true', default=False,
+ help='run integration tests as well as unit tests'),
+ parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
+ help='number of tests to run in parallel (default=%default)')
+ parser.add_option('-p', '--pass-through', action='store_true', default=False,
+ help='be debugger friendly by passing captured output through to the system')
parser.add_option('-q', '--quiet', action='store_true', default=False,
help='run quietly (errors, warnings, and progress only)')
parser.add_option('-t', '--timing', action='store_true', default=False,
help='display per-test execution time (implies --verbose)')
parser.add_option('-v', '--verbose', action='count', default=0,
help='verbose output (specify once for individual test results, twice for debug messages)')
- parser.add_option('--skip-integrationtests', action='store_true', default=False,
- help='do not run the integration tests')
- parser.add_option('-p', '--pass-through', action='store_true', default=False,
- help='be debugger friendly by passing captured output through to the system')
- parser.add_option('-j', '--child-processes', action='store', type='int', default=(1 if sys.platform == 'win32' else multiprocessing.cpu_count()),
- help='number of tests to run in parallel (default=%default)')
parser.epilog = ('[args...] is an optional list of modules, test_classes, or individual tests. '
'If no args are given, all the tests will be run.')
@@ -103,7 +111,7 @@ class Tester(object):
self.finder.clean_trees()
- names = self.finder.find_names(args, self._options.skip_integrationtests, self._options.all, self._options.child_processes != 1)
+ names = self.finder.find_names(args, self._options.all)
if not names:
_log.error('No tests to run')
return False
@@ -111,22 +119,51 @@ class Tester(object):
return self._run_tests(names)
def _run_tests(self, names):
+ # Make sure PYTHONPATH is set up properly.
+ sys.path = self.finder.additional_paths(sys.path) + sys.path
+
+ # We autoinstall everything up so that we can run tests concurrently
+ # and not have to worry about autoinstalling packages concurrently.
+ self.printer.write_update("Checking autoinstalled packages ...")
+ from webkitpy.thirdparty import autoinstall_everything
+ installed_something = autoinstall_everything()
+
+ # FIXME: There appears to be a bug in Python 2.6.1 that is causing multiprocessing
+ # to hang after we install the packages in a clean checkout.
+ if installed_something:
+ _log.warning("We installed new packages, so running things serially at first")
+ self._options.child_processes = 1
+
if self._options.coverage:
- try:
- import webkitpy.thirdparty.autoinstalled.coverage as coverage
- except ImportError:
- _log.error("Failed to import 'coverage'; can't generate coverage numbers.")
- return False
+ import webkitpy.thirdparty.autoinstalled.coverage as coverage
cov = coverage.coverage()
cov.start()
- # Make sure PYTHONPATH is set up properly.
- sys.path = self.finder.additional_paths(sys.path) + sys.path
+ self.printer.write_update("Checking imports ...")
+ if not self._check_imports(names):
+ return False
+
+ self.printer.write_update("Finding the individual test methods ...")
+ loader = _Loader()
+ parallel_tests, serial_tests = self._test_names(loader, names)
- _log.debug("Loading the tests...")
+ self.printer.write_update("Running the tests ...")
+ self.printer.num_tests = len(parallel_tests) + len(serial_tests)
+ start = time.time()
+ test_runner = Runner(self.printer, loader)
+ test_runner.run(parallel_tests, self._options.child_processes)
+ test_runner.run(serial_tests, 1)
- loader = unittest.defaultTestLoader
- suites = []
+ self.printer.print_result(time.time() - start)
+
+ if self._options.coverage:
+ cov.stop()
+ cov.save()
+ cov.report(show_missing=False)
+
+ return not self.printer.num_errors and not self.printer.num_failures
+
+ def _check_imports(self, names):
for name in names:
if self.finder.is_module(name):
# if we failed to load a name and it looks like a module,
@@ -138,19 +175,33 @@ class Tester(object):
_log.fatal('Failed to import %s:' % name)
self._log_exception()
return False
+ return True
- suites.append(loader.loadTestsFromName(name, None))
+ def _test_names(self, loader, names):
+ if self._options.integration_tests:
+ loader.test_method_prefixes.append('integration_test_')
- test_suite = unittest.TestSuite(suites)
- test_runner = Runner(self.printer, self._options, loader)
+ parallel_tests = []
+ if self._options.child_processes > 1:
+ for name in names:
+ parallel_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
+ loader.test_method_prefixes = []
- _log.debug("Running the tests.")
- result = test_runner.run(test_suite)
- if self._options.coverage:
- cov.stop()
- cov.save()
- cov.report(show_missing=False)
- return result.wasSuccessful()
+ serial_tests = []
+ loader.test_method_prefixes.extend(['serial_test_', 'serial_integration_test_'])
+ for name in names:
+ serial_tests.extend(self._all_test_names(loader.loadTestsFromName(name, None)))
+
+ return (parallel_tests, serial_tests)
+
+ def _all_test_names(self, suite):
+ names = []
+ if hasattr(suite, '_tests'):
+ for t in suite._tests:
+ names.extend(self._all_test_names(t))
+ else:
+ names.append(unit_test_name(suite))
+ return names
def _log_exception(self):
s = StringIO.StringIO()
@@ -158,5 +209,19 @@ class Tester(object):
for l in s.buflist:
_log.error(' ' + l.rstrip())
+
+class _Loader(unittest.TestLoader):
+ test_method_prefixes = ['test_']
+
+ def getTestCaseNames(self, testCaseClass):
+ def isTestMethod(attrname, testCaseClass=testCaseClass):
+ if not hasattr(getattr(testCaseClass, attrname), '__call__'):
+ return False
+ return (any(attrname.startswith(prefix) for prefix in self.test_method_prefixes))
+ testFnNames = filter(isTestMethod, dir(testCaseClass))
+ testFnNames.sort()
+ return testFnNames
+
+
if __name__ == '__main__':
sys.exit(main())
diff --git a/Tools/Scripts/webkitpy/test/main_unittest.py b/Tools/Scripts/webkitpy/test/main_unittest.py
index 61e49a7b9..ca7ebba0e 100644
--- a/Tools/Scripts/webkitpy/test/main_unittest.py
+++ b/Tools/Scripts/webkitpy/test/main_unittest.py
@@ -41,13 +41,13 @@ class TesterTest(unittest.TestCase):
root_logger.handlers = []
tester.printer.stream = errors
- tester.finder.find_names = lambda args, skip_integration, run_all, skip_if_parallel: []
+ tester.finder.find_names = lambda args, run_all: []
oc = OutputCapture()
try:
oc.capture_output()
self.assertFalse(tester.run())
finally:
- out, err, logs = oc.restore_output()
+ _, _, logs = oc.restore_output()
root_logger.handlers = root_handlers
self.assertTrue('No tests to run' in errors.getvalue())
diff --git a/Tools/Scripts/webkitpy/test/printer.py b/Tools/Scripts/webkitpy/test/printer.py
index 042fba13c..0ec3035b3 100644
--- a/Tools/Scripts/webkitpy/test/printer.py
+++ b/Tools/Scripts/webkitpy/test/printer.py
@@ -37,6 +37,8 @@ class Printer(object):
self.options = options
self.num_tests = 0
self.num_completed = 0
+ self.num_errors = 0
+ self.num_failures = 0
self.running_tests = []
self.completed_tests = []
if options:
@@ -102,6 +104,9 @@ class Printer(object):
if self.options.pass_through:
outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream
+ def write_update(self, msg):
+ self.meter.write_update(msg)
+
def print_started_test(self, source, test_name):
self.running_tests.append(test_name)
if len(self.running_tests) > 1:
@@ -116,14 +121,16 @@ class Printer(object):
write(self._test_line(self.running_tests[0], suffix))
- def print_finished_test(self, result, test_name, test_time, failure, err):
+ def print_finished_test(self, source, test_name, test_time, failures, errors):
write = self.meter.writeln
- if failure:
- lines = failure[0][1].splitlines() + ['']
+ if failures:
+ lines = failures[0].splitlines() + ['']
suffix = ' failed:'
- elif err:
- lines = err[0][1].splitlines() + ['']
+ self.num_failures += 1
+ elif errors:
+ lines = errors[0].splitlines() + ['']
suffix = ' erred:'
+ self.num_errors += 1
else:
suffix = ' passed'
lines = []
@@ -154,13 +161,13 @@ class Printer(object):
def _test_line(self, test_name, suffix):
return '[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix)
- def print_result(self, result, run_time):
+ def print_result(self, run_time):
write = self.meter.writeln
- write('Ran %d test%s in %.3fs' % (result.testsRun, result.testsRun != 1 and "s" or "", run_time))
- if result.wasSuccessful():
- write('\nOK\n')
+ write('Ran %d test%s in %.3fs' % (self.num_completed, self.num_completed != 1 and "s" or "", run_time))
+ if self.num_failures or self.num_errors:
+ write('FAILED (failures=%d, errors=%d)\n' % (self.num_failures, self.num_errors))
else:
- write('FAILED (failures=%d, errors=%d)\n' % (len(result.failures), len(result.errors)))
+ write('\nOK\n')
class _CaptureAndPassThroughStream(object):
diff --git a/Tools/Scripts/webkitpy/test/runner.py b/Tools/Scripts/webkitpy/test/runner.py
index fd8af6fe0..d3f57643c 100644
--- a/Tools/Scripts/webkitpy/test/runner.py
+++ b/Tools/Scripts/webkitpy/test/runner.py
@@ -22,61 +22,47 @@
"""code to actually run a list of python tests."""
-import logging
import re
import time
import unittest
from webkitpy.common import message_pool
-_log = logging.getLogger(__name__)
-
-
_test_description = re.compile("(\w+) \(([\w.]+)\)")
-def _test_name(test):
+def unit_test_name(test):
m = _test_description.match(str(test))
return "%s.%s" % (m.group(2), m.group(1))
class Runner(object):
- def __init__(self, printer, options, loader):
- self.options = options
+ def __init__(self, printer, loader):
self.printer = printer
self.loader = loader
- self.result = unittest.TestResult()
+ self.tests_run = 0
+ self.errors = []
+ self.failures = []
self.worker_factory = lambda caller: _Worker(caller, self.loader)
- def all_test_names(self, suite):
- names = []
- if hasattr(suite, '_tests'):
- for t in suite._tests:
- names.extend(self.all_test_names(t))
- else:
- names.append(_test_name(suite))
- return names
-
- def run(self, suite):
- run_start_time = time.time()
- all_test_names = self.all_test_names(suite)
- self.printer.num_tests = len(all_test_names)
-
- with message_pool.get(self, self.worker_factory, int(self.options.child_processes)) as pool:
- pool.run(('test', test_name) for test_name in all_test_names)
-
- self.printer.print_result(self.result, time.time() - run_start_time)
- return self.result
+ def run(self, test_names, num_workers):
+ if not test_names:
+ return
+ num_workers = min(num_workers, len(test_names))
+ with message_pool.get(self, self.worker_factory, num_workers) as pool:
+ pool.run(('test', test_name) for test_name in test_names)
- def handle(self, message_name, source, test_name, delay=None, result=None):
+ def handle(self, message_name, source, test_name, delay=None, failures=None, errors=None):
if message_name == 'started_test':
self.printer.print_started_test(source, test_name)
return
- self.result.testsRun += 1
- self.result.errors.extend(result.errors)
- self.result.failures.extend(result.failures)
- self.printer.print_finished_test(source, test_name, delay, result.failures, result.errors)
+ self.tests_run += 1
+ if failures:
+ self.failures.append((test_name, failures))
+ if errors:
+ self.errors.append((test_name, errors))
+ self.printer.print_finished_test(source, test_name, delay, failures, errors)
class _Worker(object):
@@ -89,13 +75,8 @@ class _Worker(object):
result = unittest.TestResult()
start = time.time()
self._caller.post('started_test', test_name)
- self._loader.loadTestsFromName(test_name, None).run(result)
- # The tests in the TestResult contain file objects and other unpicklable things; we only
- # care about the test name, so we rewrite the result to replace the test with the test name.
- # FIXME: We need an automated test for this, but I don't know how to write an automated
- # test that will fail in this case that doesn't get picked up by test-webkitpy normally :(.
- result.failures = [(_test_name(failure[0]), failure[1]) for failure in result.failures]
- result.errors = [(_test_name(error[0]), error[1]) for error in result.errors]
-
- self._caller.post('finished_test', test_name, time.time() - start, result)
+ # We will need to rework this if a test_name results in multiple tests.
+ self._loader.loadTestsFromName(test_name, None).run(result)
+ self._caller.post('finished_test', test_name, time.time() - start,
+ [failure[1] for failure in result.failures], [error[1] for error in result.errors])
diff --git a/Tools/Scripts/webkitpy/test/runner_unittest.py b/Tools/Scripts/webkitpy/test/runner_unittest.py
index 07c5c31ea..8fe1b0633 100644
--- a/Tools/Scripts/webkitpy/test/runner_unittest.py
+++ b/Tools/Scripts/webkitpy/test/runner_unittest.py
@@ -27,7 +27,7 @@ import unittest
from webkitpy.tool.mocktool import MockOptions
from webkitpy.test.printer import Printer
-from webkitpy.test.runner import Runner, _Worker
+from webkitpy.test.runner import Runner
class FakeModuleSuite(object):
@@ -65,7 +65,7 @@ class FakeLoader(object):
def top_suite(self):
return FakeTopSuite(self._tests)
- def loadTestsFromName(self, name, dummy):
+ def loadTestsFromName(self, name, _):
return FakeModuleSuite(*self._results[name])
@@ -84,28 +84,17 @@ class RunnerTest(unittest.TestCase):
for handler in self.log_handlers:
handler.level = self.log_levels.pop(0)
- def assert_run(self, verbose=0, timing=False, child_processes=1, quiet=False):
+ def test_run(self, verbose=0, timing=False, child_processes=1, quiet=False):
options = MockOptions(verbose=verbose, timing=timing, child_processes=child_processes, quiet=quiet, pass_through=False)
stream = StringIO.StringIO()
loader = FakeLoader(('test1 (Foo)', '.', ''),
('test2 (Foo)', 'F', 'test2\nfailed'),
('test3 (Foo)', 'E', 'test3\nerred'))
- runner = Runner(Printer(stream, options), options, loader)
- result = runner.run(loader.top_suite())
- self.assertFalse(result.wasSuccessful())
- self.assertEquals(result.testsRun, 3)
- self.assertEquals(len(result.failures), 1)
- self.assertEquals(len(result.errors), 1)
- # FIXME: check the output from the test
-
- def test_regular(self):
- self.assert_run()
-
- def test_verbose(self):
- self.assert_run(verbose=1)
-
- def test_timing(self):
- self.assert_run(timing=True)
+ runner = Runner(Printer(stream, options), loader)
+ runner.run(['Foo.test1', 'Foo.test2', 'Foo.test3'], 1)
+ self.assertEquals(runner.tests_run, 3)
+ self.assertEquals(len(runner.failures), 1)
+ self.assertEquals(len(runner.errors), 1)
if __name__ == '__main__':
diff --git a/Tools/Scripts/webkitpy/thirdparty/__init__.py b/Tools/Scripts/webkitpy/thirdparty/__init__.py
index b1edd4d0b..17ae62a07 100644
--- a/Tools/Scripts/webkitpy/thirdparty/__init__.py
+++ b/Tools/Scripts/webkitpy/thirdparty/__init__.py
@@ -70,6 +70,8 @@ class AutoinstallImportHook(object):
if '.autoinstalled' not in fullname:
return
+ # Note: all of the methods must follow the "_install_XXX" convention in
+ # order for autoinstall_everything(), below, to work properly.
if '.mechanize' in fullname:
self._install_mechanize()
elif '.pep8' in fullname:
@@ -88,18 +90,19 @@ class AutoinstallImportHook(object):
self._install_webpagereplay()
def _install_mechanize(self):
- self._install("http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
- "mechanize-0.2.5/mechanize")
+ return self._install("http://pypi.python.org/packages/source/m/mechanize/mechanize-0.2.5.tar.gz",
+ "mechanize-0.2.5/mechanize")
def _install_pep8(self):
- self._install("http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
- "pep8-0.5.0/pep8.py")
+ return self._install("http://pypi.python.org/packages/source/p/pep8/pep8-0.5.0.tar.gz#md5=512a818af9979290cd619cce8e9c2e2b",
+ "pep8-0.5.0/pep8.py")
def _install_pylint(self):
+ installed_something = False
if not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "pylint")):
- self._install('http://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', 'pylint-0.25.1')
+ installed_something = self._install('http://pypi.python.org/packages/source/p/pylint/pylint-0.25.1.tar.gz#md5=728bbc2b339bc3749af013709a7f87a5', 'pylint-0.25.1')
self._fs.move(self._fs.join(_AUTOINSTALLED_DIR, "pylint-0.25.1"), self._fs.join(_AUTOINSTALLED_DIR, "pylint"))
-
+ return installed_something
# autoinstalled.buildbot is used by BuildSlaveSupport/build.webkit.org-config/mastercfg_unittest.py
# and should ideally match the version of BuildBot used at build.webkit.org.
@@ -111,24 +114,24 @@ class AutoinstallImportHook(object):
# without including other modules as a side effect.
jinja_dir = self._fs.join(_AUTOINSTALLED_DIR, "jinja2")
installer = AutoInstaller(append_to_search_path=True, target_dir=jinja_dir)
- installer.install(url="http://pypi.python.org/packages/source/J/Jinja2/Jinja2-2.6.tar.gz#md5=1c49a8825c993bfdcf55bb36897d28a2",
- url_subpath="Jinja2-2.6/jinja2")
+ installed_something = installer.install(url="http://pypi.python.org/packages/source/J/Jinja2/Jinja2-2.6.tar.gz#md5=1c49a8825c993bfdcf55bb36897d28a2",
+ url_subpath="Jinja2-2.6/jinja2")
SQLAlchemy_dir = self._fs.join(_AUTOINSTALLED_DIR, "sqlalchemy")
installer = AutoInstaller(append_to_search_path=True, target_dir=SQLAlchemy_dir)
- installer.install(url="http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.7.7.tar.gz#md5=ddf6df7e014cea318fa981364f3f93b9",
- url_subpath="SQLAlchemy-0.7.7/lib/sqlalchemy")
+ installed_something |= installer.install(url="http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.7.7.tar.gz#md5=ddf6df7e014cea318fa981364f3f93b9",
+ url_subpath="SQLAlchemy-0.7.7/lib/sqlalchemy")
- self._install("http://pypi.python.org/packages/source/b/buildbot/buildbot-0.8.6p1.tar.gz#md5=b6727d2810c692062c657492bcbeac6a", "buildbot-0.8.6p1/buildbot")
+ installed_something |= self._install("http://pypi.python.org/packages/source/b/buildbot/buildbot-0.8.6p1.tar.gz#md5=b6727d2810c692062c657492bcbeac6a", "buildbot-0.8.6p1/buildbot")
+ return installed_something
def _install_coverage(self):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
- installer.install(url="http://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24", url_subpath="coverage-3.5.1/coverage")
+ return installer.install(url="http://pypi.python.org/packages/source/c/coverage/coverage-3.5.1.tar.gz#md5=410d4c8155a4dab222f2bc51212d4a24", url_subpath="coverage-3.5.1/coverage")
def _install_eliza(self):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
- installer.install(url="http://www.adambarth.com/webkit/eliza",
- target_name="eliza.py")
+ return installer.install(url="http://www.adambarth.com/webkit/eliza", target_name="eliza.py")
def _install_irc(self):
# Since irclib and ircbot are two top-level packages, we need to import
@@ -136,23 +139,35 @@ class AutoinstallImportHook(object):
# organization purposes.
irc_dir = self._fs.join(_AUTOINSTALLED_DIR, "irc")
installer = AutoInstaller(target_dir=irc_dir)
- installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
- url_subpath="irclib.py")
- installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
+ installed_something = installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
+ url_subpath="irclib.py")
+ installed_something |= installer.install(url="http://downloads.sourceforge.net/project/python-irclib/python-irclib/0.4.8/python-irclib-0.4.8.zip",
url_subpath="ircbot.py")
+ return installed_something
def _install_webpagereplay(self):
+ installed_something = False
if not self._fs.exists(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay")):
- self._install("http://web-page-replay.googlecode.com/files/webpagereplay-1.1.2.tar.gz", "webpagereplay-1.1.2")
+ installed_something = self._install("http://web-page-replay.googlecode.com/files/webpagereplay-1.1.2.tar.gz", "webpagereplay-1.1.2")
self._fs.move(self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay-1.1.2"), self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay"))
init_path = self._fs.join(_AUTOINSTALLED_DIR, "webpagereplay", "__init__.py")
if not self._fs.exists(init_path):
self._fs.write_text_file(init_path, "")
+ return installed_something
def _install(self, url, url_subpath):
installer = AutoInstaller(target_dir=_AUTOINSTALLED_DIR)
- installer.install(url=url, url_subpath=url_subpath)
+ return installer.install(url=url, url_subpath=url_subpath)
+
+
+_hook = AutoinstallImportHook()
+sys.meta_path.append(_hook)
-sys.meta_path.append(AutoinstallImportHook())
+def autoinstall_everything():
+ install_methods = [method for method in dir(_hook.__class__) if method.startswith('_install_')]
+ installed_something = False
+ for method in install_methods:
+ installed_something |= getattr(_hook, method)()
+ return installed_something
diff --git a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
index 1150d1dd4..7c1487d7e 100644
--- a/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/flakytestreporter_unittest.py
@@ -125,7 +125,7 @@ If you would like to track this test fix with another bug, please close this bug
component: Tools / Tests
cc: abarth@webkit.org
blocked: 50856
-MOCK add_attachment_to_bug: bug_id=60001, description=Failure diff from mock-bot-id filename=failure.diff
+MOCK add_attachment_to_bug: bug_id=60001, description=Failure diff from mock-bot-id filename=failure.diff mimetype=None
MOCK bug comment: bug_id=50000, cc=None
--- Begin comment ---
The dummy-queue encountered the following flaky tests while processing attachment 10000:
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command.py b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
index 7d3a6fd5e..1c061a8db 100644
--- a/Tools/Scripts/webkitpy/tool/bot/irc_command.py
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command.py
@@ -210,18 +210,6 @@ class Whois(IRCCommand):
return "%s: I'm not sure who you mean? %s could be '%s'." % (nick, contributors_string, search_string)
-class Eliza(IRCCommand):
- therapist = None
-
- def __init__(self):
- if not self.therapist:
- import webkitpy.thirdparty.autoinstalled.eliza as eliza
- Eliza.therapist = eliza.eliza()
-
- def execute(self, nick, args, tool, sheriff):
- return "%s: %s" % (nick, self.therapist.respond(" ".join(args)))
-
-
class CreateBug(IRCCommand):
def execute(self, nick, args, tool, sheriff):
if not args:
diff --git a/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
index 371e308ec..4dec669fd 100644
--- a/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/irc_command_unittest.py
@@ -35,11 +35,6 @@ from webkitpy.common.system.executive_mock import MockExecutive
class IRCCommandTest(unittest.TestCase):
- def test_eliza(self):
- eliza = Eliza()
- eliza.execute("tom", "hi", None, None)
- eliza.execute("tom", "bye", None, None)
-
def test_whois(self):
whois = Whois()
self.assertEquals("tom: Usage: whois SEARCH_STRING",
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot.py b/Tools/Scripts/webkitpy/tool/bot/ircbot.py
index 7269c2ec5..0c45b97bf 100644
--- a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot.py
+++ b/Tools/Scripts/webkitpy/tool/bot/ircbot.py
@@ -26,14 +26,15 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from webkitpy.tool.bot import irc_command
from webkitpy.tool.bot.queueengine import TerminateQueue
+from webkitpy.tool.bot.irc_command import IRCCommand
from webkitpy.common.net.irc.ircbot import IRCBotDelegate
from webkitpy.common.thread.threadedmessagequeue import ThreadedMessageQueue
class _IRCThreadTearoff(IRCBotDelegate):
- def __init__(self, password, message_queue, wakeup_event):
+ def __init__(self, name, password, message_queue, wakeup_event):
+ self._name = name
self._password = password
self._message_queue = message_queue
self._wakeup_event = wakeup_event
@@ -45,37 +46,50 @@ class _IRCThreadTearoff(IRCBotDelegate):
self._wakeup_event.set()
def irc_nickname(self):
- return "sheriffbot"
+ return self._name
def irc_password(self):
return self._password
-class SheriffIRCBot(object):
- def __init__(self, tool, sheriff):
+class Eliza(IRCCommand):
+ therapist = None
+
+ def __init__(self):
+ if not self.therapist:
+ import webkitpy.thirdparty.autoinstalled.eliza as eliza
+ Eliza.therapist = eliza.eliza()
+
+ def execute(self, nick, args, tool, sheriff):
+ return "%s: %s" % (nick, self.therapist.respond(" ".join(args)))
+
+
+class IRCBot(object):
+ def __init__(self, name, tool, agent, commands):
+ self._name = name
self._tool = tool
- self._sheriff = sheriff
+ self._agent = agent
self._message_queue = ThreadedMessageQueue()
+ self._commands = commands
def irc_delegate(self):
- return _IRCThreadTearoff(self._tool.irc_password,
- self._message_queue,
- self._tool.wakeup_event)
+ return _IRCThreadTearoff(self._name, self._tool.irc_password,
+ self._message_queue, self._tool.wakeup_event)
def _parse_command_and_args(self, request):
tokenized_request = request.strip().split(" ")
- command = irc_command.commands.get(tokenized_request[0])
+ command = self._commands.get(tokenized_request[0])
args = tokenized_request[1:]
if not command:
# Give the peoples someone to talk with.
- command = irc_command.Eliza
+ command = Eliza
args = tokenized_request
return (command, args)
def process_message(self, requester_nick, request):
command, args = self._parse_command_and_args(request)
try:
- response = command().execute(requester_nick, args, self._tool, self._sheriff)
+ response = command().execute(requester_nick, args, self._tool, self._agent)
if response:
self._tool.irc().post(response)
except TerminateQueue:
diff --git a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py b/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
index 018f1f733..ce9a76bda 100644
--- a/Tools/Scripts/webkitpy/tool/bot/sheriffircbot_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/bot/ircbot_unittest.py
@@ -3,7 +3,7 @@
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
-#
+#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
@@ -13,7 +13,7 @@
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
-#
+#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -33,7 +33,8 @@ from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.tool.bot import irc_command
from webkitpy.tool.bot.queueengine import TerminateQueue
from webkitpy.tool.bot.sheriff import Sheriff
-from webkitpy.tool.bot.sheriffircbot import SheriffIRCBot
+from webkitpy.tool.bot.ircbot import IRCBot
+from webkitpy.tool.bot.ircbot import Eliza
from webkitpy.tool.bot.sheriff_unittest import MockSheriffBot
from webkitpy.tool.mocktool import MockTool
@@ -41,24 +42,29 @@ from webkitpy.tool.mocktool import MockTool
def run(message):
tool = MockTool()
tool.ensure_irc_connected(None)
- bot = SheriffIRCBot(tool, Sheriff(tool, MockSheriffBot()))
+ bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
bot._message_queue.post(["mock_nick", message])
bot.process_pending_messages()
-class SheriffIRCBotTest(unittest.TestCase):
+class IRCBotTest(unittest.TestCase):
+ def test_eliza(self):
+ eliza = Eliza()
+ eliza.execute("tom", "hi", None, None)
+ eliza.execute("tom", "bye", None, None)
+
def test_parse_command_and_args(self):
tool = MockTool()
- bot = SheriffIRCBot(tool, Sheriff(tool, MockSheriffBot()))
- self.assertEqual(bot._parse_command_and_args(""), (irc_command.Eliza, [""]))
- self.assertEqual(bot._parse_command_and_args(" "), (irc_command.Eliza, [""]))
+ bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
+ self.assertEqual(bot._parse_command_and_args(""), (Eliza, [""]))
+ self.assertEqual(bot._parse_command_and_args(" "), (Eliza, [""]))
self.assertEqual(bot._parse_command_and_args(" hi "), (irc_command.Hi, []))
self.assertEqual(bot._parse_command_and_args(" hi there "), (irc_command.Hi, ["there"]))
def test_exception_during_command(self):
tool = MockTool()
tool.ensure_irc_connected(None)
- bot = SheriffIRCBot(tool, Sheriff(tool, MockSheriffBot()))
+ bot = IRCBot("sheriffbot", tool, Sheriff(tool, MockSheriffBot()), irc_command.commands)
class CommandWithException(object):
def execute(self, nick, args, tool, sheriff):
diff --git a/Tools/Scripts/webkitpy/tool/commands/__init__.py b/Tools/Scripts/webkitpy/tool/commands/__init__.py
index e0d885f71..4e8eb62e1 100644
--- a/Tools/Scripts/webkitpy/tool/commands/__init__.py
+++ b/Tools/Scripts/webkitpy/tool/commands/__init__.py
@@ -12,6 +12,7 @@ from webkitpy.tool.commands.expectations import OptimizeExpectations
from webkitpy.tool.commands.findusers import FindUsers
from webkitpy.tool.commands.gardenomatic import GardenOMatic
from webkitpy.tool.commands.openbugs import OpenBugs
+from webkitpy.tool.commands.perfalizer import Perfalizer
from webkitpy.tool.commands.prettydiff import PrettyDiff
from webkitpy.tool.commands.queries import *
from webkitpy.tool.commands.queues import *
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
index 639f4d8fc..08c8bf685 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem.py
@@ -192,6 +192,14 @@ class ChromiumWindowsEWS(AbstractChromiumEWS):
name = "cr-win-ews"
+class ChromiumAndroidEWS(AbstractChromiumEWS):
+ name = "cr-android-ews"
+ port_name = "chromium-android"
+ watchers = AbstractChromiumEWS.watchers + [
+ "peter+ews@chromium.org",
+ ]
+
+
class MacEWS(AbstractEarlyWarningSystem):
name = "mac-ews"
port_name = "mac"
diff --git a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
index 9dbb398ee..7feff0d62 100644
--- a/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/earlywarningsystem_unittest.py
@@ -82,6 +82,7 @@ class EarlyWarningSytemTest(QueuesTest):
def test_builder_ewses(self):
self._test_builder_ews(MacEWS())
self._test_builder_ews(ChromiumWindowsEWS())
+ self._test_builder_ews(ChromiumAndroidEWS())
self._test_builder_ews(QtEWS())
self._test_builder_ews(QtWK2EWS())
self._test_builder_ews(GtkEWS())
diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer.py
new file mode 100644
index 000000000..b9fc6fe5b
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer.py
@@ -0,0 +1,215 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from webkitpy.common.system.deprecated_logging import log
+from webkitpy.tool.bot.expectedfailures import ExpectedFailures
+from webkitpy.tool.bot.irc_command import IRCCommand
+from webkitpy.tool.bot.irc_command import Help
+from webkitpy.tool.bot.irc_command import Hi
+from webkitpy.tool.bot.irc_command import Restart
+from webkitpy.tool.bot.ircbot import IRCBot
+from webkitpy.tool.bot.patchanalysistask import PatchAnalysisTask, PatchAnalysisTaskDelegate, UnableToApplyPatch
+from webkitpy.tool.bot.sheriff import Sheriff
+from webkitpy.tool.commands.queues import AbstractQueue
+from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
+
+
+class PerfalizerTask(PatchAnalysisTask):
+ def __init__(self, tool, patch, logger):
+ PatchAnalysisTask.__init__(self, self, patch)
+ self._port = tool.port_factory.get()
+ self._tool = tool
+ self._logger = logger
+
+ def _copy_build_product_without_patch(self):
+ filesystem = self._tool.filesystem
+ configuration = filesystem.basename(self._port._build_path())
+ self._build_directory = filesystem.dirname(self._port._build_path())
+ self._build_directory_without_patch = self._build_directory + 'WithoutPatch'
+
+ try:
+ filesystem.rmtree(self._build_directory_without_patch)
+ filesystem.copytree(filesystem.join(self._build_directory, configuration),
+ filesystem.join(self._build_directory_without_patch, configuration))
+ return True
+ except:
+ return False
+
+ def run(self):
+ if not self._patch.committer() and not self._patch.attacher().can_commit:
+ self._logger('The patch %d is not authorized by a commmitter' % self._patch.id())
+ return False
+
+ self._logger('Preparing to run performance tests for the attachment %d...' % self._patch.id())
+ if not self._clean() or not self._update():
+ return False
+
+ head_revision = self._tool.scm().head_svn_revision()
+
+ self._logger('Building WebKit at r%s without the patch' % head_revision)
+ if not self._build():
+ return False
+
+ if not self._port.check_build(needs_http=False):
+ self._logger('Failed to build DumpRenderTree.')
+ return False
+
+ if not self._copy_build_product_without_patch():
+ self._logger('Failed to copy the build product from %s to %s' % (self._build_directory, self._build_directory_without_patch))
+ return False
+
+ self._logger('Building WebKit at r%s with the patch' % head_revision)
+ if not self._apply() or not self._build():
+ return False
+
+ if not self._port.check_build(needs_http=False):
+ self._logger('Failed to build DumpRenderTree.')
+ return False
+
+ filesystem = self._tool.filesystem
+ if filesystem.exists(self._json_path()):
+ filesystem.remove(self._json_path())
+
+ self._logger("Running performance tests...")
+ if self._run_perf_test(self._build_directory_without_patch) < 0:
+ self._logger('Failed to run performance tests without the patch.')
+ return False
+
+ if self._run_perf_test(self._build_directory) < 0:
+ self._logger('Failed to run performance tests with the patch.')
+ return False
+
+ if not filesystem.exists(self._results_page_path()):
+ self._logger('Failed to generate the results page.')
+ return False
+
+ results_page = filesystem.read_text_file(self._results_page_path())
+ self._tool.bugs.add_attachment_to_bug(self._patch.bug_id(), results_page,
+ description="Performance tests results for %d" % self._patch.id(), mimetype='text/html')
+
+ self._logger("Uploaded the results on the bug %d" % self._patch.bug_id())
+ return True
+
+ def parent_command(self):
+ return "perfalizer"
+
+ def run_webkit_patch(self, args):
+ webkit_patch_args = [self._tool.path()]
+ webkit_patch_args.extend(args)
+ return self._tool.executive.run_and_throw_if_fail(webkit_patch_args, cwd=self._tool.scm().checkout_root)
+
+ def _json_path(self):
+ return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.json')
+
+ def _results_page_path(self):
+ return self._tool.filesystem.join(self._build_directory, 'PerformanceTestResults.html')
+
+ def _run_perf_test(self, build_path):
+ filesystem = self._tool.filesystem
+ script_path = filesystem.join(filesystem.dirname(self._tool.path()), 'run-perf-tests')
+ perf_test_runner_args = [script_path, '--no-build', '--no-show-results', '--build-directory', build_path,
+ '--output-json-path', self._json_path()]
+ return self._tool.executive.run_and_throw_if_fail(perf_test_runner_args, cwd=self._tool.scm().checkout_root)
+
+ def run_command(self, command):
+ self.run_webkit_patch(command)
+
+ def command_passed(self, message, patch):
+ pass
+
+ def command_failed(self, message, script_error, patch):
+ self._logger(message)
+
+ def refetch_patch(self, patch):
+ return self._tool.bugs.fetch_attachment(patch.id())
+
+ def expected_failures(self):
+ return ExpectedFailures()
+
+ def build_style(self):
+ return "release"
+
+
+class PerfTest(IRCCommand):
+ def execute(self, nick, args, tool, sheriff):
+ if not args:
+ tool.irc().post(nick + ": Please specify an attachment/patch id")
+ return
+
+ patch_id = args[0]
+ patch = tool.bugs.fetch_attachment(patch_id)
+ if not patch:
+ tool.irc().post(nick + ": Could not fetch the patch")
+ return
+
+ task = PerfalizerTask(tool, patch, lambda message: tool.irc().post('%s: %s' % (nick, message)))
+ task.run()
+
+
+class Perfalizer(AbstractQueue, StepSequenceErrorHandler):
+ name = "perfalizer"
+ watchers = AbstractQueue.watchers + ["rniwa@webkit.org"]
+
+ _commands = {
+ "help": Help,
+ "hi": Hi,
+ "restart": Restart,
+ "test": PerfTest,
+ }
+
+ # AbstractQueue methods
+
+ def begin_work_queue(self):
+ AbstractQueue.begin_work_queue(self)
+ self._sheriff = Sheriff(self._tool, self)
+ self._irc_bot = IRCBot("perfalizer", self._tool, self._sheriff, self._commands)
+ self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
+
+ def work_item_log_path(self, failure_map):
+ return None
+
+ def _is_old_failure(self, revision):
+ return self._tool.status_server.svn_revision(revision)
+
+ def next_work_item(self):
+ self._irc_bot.process_pending_messages()
+ return
+
+ def process_work_item(self, failure_map):
+ return True
+
+ def handle_unexpected_error(self, failure_map, message):
+ log(message)
+
+ # StepSequenceErrorHandler methods
+
+ @classmethod
+ def handle_script_error(cls, tool, state, script_error):
+ # Ideally we would post some information to IRC about what went wrong
+ # here, but we don't have the IRC password in the child process.
+ pass
diff --git a/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
new file mode 100644
index 000000000..f519e3f78
--- /dev/null
+++ b/Tools/Scripts/webkitpy/tool/commands/perfalizer_unittest.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2012 Google Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import unittest
+
+from webkitpy.common.net.buildbot import Builder
+from webkitpy.common.system.executive import ScriptError
+from webkitpy.common.system.outputcapture import OutputCapture
+from webkitpy.layout_tests.port.test import TestPort
+from webkitpy.tool.commands.perfalizer import PerfalizerTask
+from webkitpy.tool.mocktool import MockTool
+
+
+class PerfalizerTaskTest(unittest.TestCase):
+ def _create_and_run_perfalizer(self, commands_to_fail=[]):
+ tool = MockTool()
+ patch = tool.bugs.fetch_attachment(10000)
+
+ logs = []
+
+ def logger(message):
+ logs.append(message)
+
+ def run_webkit_patch(args):
+ if args[0] in commands_to_fail:
+ raise ScriptError
+
+ def run_perf_test(build_path):
+ if 'run-perf-tests' in commands_to_fail:
+ return -1
+ if 'results-page' not in commands_to_fail:
+ tool.filesystem.write_text_file(tool.filesystem.join(build_path, 'PerformanceTestResults.html'), 'results page')
+ return 0
+
+ perfalizer = PerfalizerTask(tool, patch, logger)
+ perfalizer._port = TestPort(tool)
+ perfalizer.run_webkit_patch = run_webkit_patch
+ perfalizer._run_perf_test = run_perf_test
+
+ capture = OutputCapture()
+ capture.capture_output()
+
+ if commands_to_fail:
+ self.assertFalse(perfalizer.run())
+ else:
+ self.assertTrue(perfalizer.run())
+
+ capture.restore_output()
+
+ return logs
+
+ def test_run(self):
+ self.assertEqual(self._create_and_run_perfalizer(), [
+ 'Preparing to run performance tests for the attachment 10000...',
+ 'Building WebKit at r1234 without the patch',
+ 'Building WebKit at r1234 with the patch',
+ 'Running performance tests...',
+ 'Uploaded the results on the bug 50000'])
+
+ def test_run_with_clean_fails(self):
+ self.assertEqual(self._create_and_run_perfalizer(['clean']), [
+ 'Preparing to run performance tests for the attachment 10000...',
+ 'Unable to clean working directory'])
+
+ def test_run_with_update_fails(self):
+ logs = self._create_and_run_perfalizer(['update'])
+ self.assertEqual(len(logs), 2)
+ self.assertEqual(logs[-1], 'Unable to update working directory')
+
+ def test_run_with_build_fails(self):
+ logs = self._create_and_run_perfalizer(['build'])
+ self.assertEqual(len(logs), 3)
+
+ def test_run_with_build_fails(self):
+ logs = self._create_and_run_perfalizer(['apply-attachment'])
+ self.assertEqual(len(logs), 4)
+
+ def test_run_with_perf_test_fails(self):
+ logs = self._create_and_run_perfalizer(['run-perf-tests'])
+ self.assertEqual(len(logs), 5)
+ self.assertEqual(logs[-1], 'Failed to run performance tests without the patch.')
+
+ def test_run_without_results_page(self):
+ logs = self._create_and_run_perfalizer(['results-page'])
+ self.assertEqual(len(logs), 5)
+ self.assertEqual(logs[-1], 'Failed to generate the results page.')
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues.py b/Tools/Scripts/webkitpy/tool/commands/queues.py
index b251c0fb6..e8db17c7b 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues.py
@@ -379,6 +379,9 @@ class AbstractReviewQueue(AbstractPatchQueue, StepSequenceErrorHandler):
# AbstractPatchQueue methods
+ def begin_work_queue(self):
+ AbstractPatchQueue.begin_work_queue(self)
+
def next_work_item(self):
return self._next_patch()
@@ -413,23 +416,6 @@ class StyleQueue(AbstractReviewQueue, StyleQueueTaskDelegate):
def __init__(self):
AbstractReviewQueue.__init__(self)
- def begin_work_queue(self):
- AbstractReviewQueue.begin_work_queue(self)
- self.clean_bugzilla()
-
- def clean_bugzilla(self):
- try:
- self._update_status("Cleaning review queue")
- self.run_webkit_patch(["clean-review-queue"])
- except ScriptError, e:
- self._update_status(e)
-
- try:
- self._update_status("Cleaning pending commit")
- self.run_webkit_patch(["clean-pending-commit"])
- except ScriptError, e:
- self._update_status(e)
-
def review_patch(self, patch):
task = StyleQueueTask(self, patch)
if not task.validate():
diff --git a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
index 2e6b1f07b..450a912e0 100644
--- a/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/queues_unittest.py
@@ -173,7 +173,7 @@ class AbstractPatchQueueTest(CommandsTest):
queue._options = Mock()
queue._options.port = None
patch = queue._tool.bugs.fetch_attachment(10001)
- expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot filename=layout-test-results.zip
+ expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
-- Begin comment --
The attached test failures were seen while running run-webkit-tests on the mock-queue.
Port: MockPort Platform: MockPlatform 1.0
@@ -405,14 +405,14 @@ The commit-queue just saw foo/bar.html flake (Text diff mismatch) while processi
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
-MOCK add_attachment_to_bug: bug_id=50002, description=Failure diff from bot filename=failure.diff
+MOCK add_attachment_to_bug: bug_id=50002, description=Failure diff from bot filename=failure.diff mimetype=None
MOCK bug comment: bug_id=50002, cc=None
--- Begin comment ---
The commit-queue just saw bar/baz.html flake (Text diff mismatch) while processing attachment 10000 on bug 50000.
Port: MockPort Platform: MockPlatform 1.0
--- End comment ---
-MOCK add_attachment_to_bug: bug_id=50002, description=Archive of layout-test-results from bot filename=layout-test-results.zip
+MOCK add_attachment_to_bug: bug_id=50002, description=Archive of layout-test-results from bot filename=layout-test-results.zip mimetype=None
MOCK bug comment: bug_id=50000, cc=None
--- Begin comment ---
The commit-queue encountered the following flaky tests while processing attachment 10000:
@@ -449,11 +449,7 @@ The commit-queue is continuing to process your patch.
class StyleQueueTest(QueuesTest):
def test_style_queue_with_style_exception(self):
expected_stderr = {
- "begin_work_queue": self._default_begin_work_queue_stderr("style-queue") + """MOCK: update_status: style-queue Cleaning review queue
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-review-queue'], cwd=/mock-checkout
-MOCK: update_status: style-queue Cleaning pending commit
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-pending-commit'], cwd=/mock-checkout
-""",
+ "begin_work_queue": self._default_begin_work_queue_stderr("style-queue"),
"next_work_item": "",
"process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
MOCK: update_status: style-queue Cleaned working directory
@@ -476,11 +472,7 @@ MOCK: release_work_item: style-queue 10000
def test_style_queue_with_watch_list_exception(self):
expected_stderr = {
- "begin_work_queue": self._default_begin_work_queue_stderr("style-queue") + """MOCK: update_status: style-queue Cleaning review queue
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-review-queue'], cwd=/mock-checkout
-MOCK: update_status: style-queue Cleaning pending commit
-MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean-pending-commit'], cwd=/mock-checkout
-""",
+ "begin_work_queue": self._default_begin_work_queue_stderr("style-queue"),
"next_work_item": "",
"process_work_item": """MOCK run_and_throw_if_fail: ['echo', '--status-host=example.com', 'clean'], cwd=/mock-checkout
MOCK: update_status: style-queue Cleaned working directory
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
index ed27ab553..7ccbf565a 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline.py
@@ -321,16 +321,13 @@ class RebaselineExpectations(AbstractParallelRebaselineCommand):
name = "rebaseline-expectations"
help_text = "Rebaselines the tests indicated in TestExpectations."
- def _update_expectations_file(self, port_name):
+ def _update_expectations_files(self, port_name):
port = self._tool.port_factory.get(port_name)
- # FIXME: This will intentionally skip over any REBASELINE expectations that were in an overrides file.
- # This is not good, but avoids having the overrides getting written into the main file.
- # See https://bugs.webkit.org/show_bug.cgi?id=88456 for context. This will no longer be needed
- # once we properly support cascading expectations files.
expectations = TestExpectations(port, include_overrides=False)
- path = port.path_to_test_expectations_file()
- self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures()))
+ for path in port.expectations_dict():
+ if self._tool.filesystem.exists(path):
+ self._tool.filesystem.write_text_file(path, expectations.remove_rebaselined_tests(expectations.get_rebaselining_failures(), path))
def _tests_to_rebaseline(self, port):
tests_to_rebaseline = {}
@@ -361,7 +358,7 @@ class RebaselineExpectations(AbstractParallelRebaselineCommand):
self._rebaseline(options, self._test_list)
for port_name in tool.port_factory.all_port_names():
- self._update_expectations_file(port_name)
+ self._update_expectations_files(port_name)
class Rebaseline(AbstractParallelRebaselineCommand):
diff --git a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
index 433906b8c..93bd5c500 100644
--- a/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/rebaseline_unittest.py
@@ -63,6 +63,8 @@ class TestRebaseline(unittest.TestCase):
command.bind_to_tool(tool)
lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
+ # FIXME: work around the chromium skia expectations file to avoid getting a bunch of confusing warnings.
+ tool.filesystem.write_text_file(lion_port.path_from_chromium_base('skia', 'skia_test_expectations.txt'), '')
for path in lion_port.expectations_files():
tool.filesystem.write_text_file(path, '')
tool.filesystem.write_text_file(lion_port.path_to_test_expectations_file(), """BUGB MAC LINUX XP DEBUG : fast/dom/Window/window-postmessage-clone-really-deep-array.html = PASS
@@ -75,7 +77,6 @@ BUGA DEBUG : fast/css/large-list-of-rules-crash.html = TEXT
expected_logs = """Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.png.
Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.wav.
Retrieving http://example.com/f/builders/Webkit Mac10.7/results/layout-test-results/userscripts/another-test-actual.txt.
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
"""
OutputCapture().assert_outputs(self, command._rebaseline_test_and_update_expectations, ["Webkit Mac10.7", "userscripts/another-test.html", None], expected_logs=expected_logs)
@@ -227,6 +228,9 @@ MOCK run_command: ['echo', 'optimize-baselines', '--suffixes', 'txt', 'user-scri
tool = MockTool()
command.bind_to_tool(tool)
+ # FIXME: work around the chromium skia expectations file to avoid getting a bunch of confusing warnings.
+ lion_port = tool.port_factory.get_from_builder_name("Webkit Mac10.7")
+ tool.filesystem.write_text_file(lion_port.path_from_chromium_base('skia', 'skia_test_expectations.txt'), '')
for port_name in tool.port_factory.all_port_names():
port = tool.port_factory.get(port_name)
for path in port.expectations_files():
@@ -274,12 +278,6 @@ Retrieving results for qt-linux from Qt Linux Release.
Retrieving results for win-7sp0 from Apple Win 7 Release (Tests).
userscripts/another-test.html (txt)
userscripts/images.svg (png)
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
-Using the chromium port without having the downstream skia_test_expectations.txt file checked out. Expectations related things might be wonky.
"""
expected_stdout = """[(['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Linux 32', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Linux', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Mac10.6', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Mac10.7', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Win7', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Apple Win 7 Release (Tests)', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'EFL Linux 64-bit Release', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Webkit Win', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'GTK Linux 64-bit Release', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Qt Linux Release', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'txt', '--builder', 'Apple Lion Release WK1 (Tests)', '--test', 'userscripts/another-test.html'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Linux 32', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Linux', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Mac10.6', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Mac10.7', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Win7', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Apple Win 7 Release (Tests)', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'EFL Linux 64-bit Release', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Webkit Win', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'GTK Linux 64-bit Release', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Qt Linux Release', '--test', 'userscripts/images.svg'], '/mock-checkout'), (['echo', 'rebaseline-test-internal', '--suffixes', 'png', '--builder', 'Apple Lion Release WK1 (Tests)', '--test', 'userscripts/images.svg'], '/mock-checkout')]
diff --git a/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
index 81f435394..d30da395b 100644
--- a/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
+++ b/Tools/Scripts/webkitpy/tool/commands/sheriffbot.py
@@ -28,7 +28,8 @@
from webkitpy.common.system.deprecated_logging import log
from webkitpy.tool.bot.sheriff import Sheriff
-from webkitpy.tool.bot.sheriffircbot import SheriffIRCBot
+from webkitpy.tool.bot.irc_command import commands as irc_commands
+from webkitpy.tool.bot.ircbot import IRCBot
from webkitpy.tool.commands.queues import AbstractQueue
from webkitpy.tool.commands.stepsequence import StepSequenceErrorHandler
@@ -45,7 +46,7 @@ class SheriffBot(AbstractQueue, StepSequenceErrorHandler):
def begin_work_queue(self):
AbstractQueue.begin_work_queue(self)
self._sheriff = Sheriff(self._tool, self)
- self._irc_bot = SheriffIRCBot(self._tool, self._sheriff)
+ self._irc_bot = IRCBot("sheriffbot", self._tool, self._sheriff, irc_commands)
self._tool.ensure_irc_connected(self._irc_bot.irc_delegate())
def work_item_log_path(self, failure_map):
diff --git a/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py
index 0ab0ede8f..185bb97f3 100644
--- a/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/commands/upload_unittest.py
@@ -79,7 +79,7 @@ MOCK: user.open_url: http://example.com/50000
options = MockOptions()
options.comment = "extra comment"
options.description = "file description"
- expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=file description filename=None
+ expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=file description filename=None mimetype=None
-- Begin comment --
extra comment
-- End comment --
@@ -90,7 +90,7 @@ extra comment
options = MockOptions()
options.comment = None
options.description = None
- expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=file.txt filename=None
+ expected_stderr = """MOCK add_attachment_to_bug: bug_id=50000, description=file.txt filename=None mimetype=None
"""
self.assert_execute_outputs(AttachToBug(), [50000, "path/to/file.txt"], options=options, expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests.py b/Tools/Scripts/webkitpy/tool/steps/runtests.py
index ac5493b4e..aa8729123 100644
--- a/Tools/Scripts/webkitpy/tool/steps/runtests.py
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests.py
@@ -85,8 +85,7 @@ class RunTests(AbstractStep):
"--skip-failing-tests",
"--exit-after-n-failures=%s" % self.NON_INTERACTIVE_FAILURE_LIMIT_COUNT,
"--results-directory=%s" % self._tool.port().results_directory,
- # We customize the printing options to avoid generating massive logs on the EWS and commit-queue.
- "--print=actual,config,expected,misc,slowest,unexpected,unexpected-results",
+ "--quiet",
])
if self._options.quiet:
diff --git a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
index fd89ca946..bf888e505 100644
--- a/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/runtests_unittest.py
@@ -41,6 +41,6 @@ class RunTestsTest(unittest.TestCase):
expected_stderr = """Running WebKit unit tests
MOCK run_and_throw_if_fail: ['mock-run-webkit-unit-tests', '--gtest_output=xml:/mock-results/webkit_unit_tests_output.xml'], cwd=/mock-checkout
Running run-webkit-tests
-MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--skip-failing-tests', '--exit-after-n-failures=30', '--results-directory=/mock-results', '--print=actual,config,expected,misc,slowest,unexpected,unexpected-results'], cwd=/mock-checkout
+MOCK run_and_throw_if_fail: ['mock-run-webkit-tests', '--no-new-test-results', '--no-launch-safari', '--skip-failing-tests', '--exit-after-n-failures=30', '--results-directory=/mock-results', '--quiet'], cwd=/mock-checkout
"""
OutputCapture().assert_outputs(self, step.run, [{}], expected_stderr=expected_stderr)
diff --git a/Tools/Scripts/webkitpy/tool/steps/update_unittest.py b/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
index 19ef949da..c1a934db5 100644
--- a/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
+++ b/Tools/Scripts/webkitpy/tool/steps/update_unittest.py
@@ -28,7 +28,7 @@
import unittest
-from webkitpy.common.config.ports import ChromiumPort, ChromiumXVFBPort
+from webkitpy.common.config.ports import ChromiumPort, ChromiumAndroidPort, ChromiumXVFBPort
from webkitpy.tool.mocktool import MockOptions, MockTool
from webkitpy.tool.steps.update import Update
@@ -47,6 +47,9 @@ class UpdateTest(unittest.TestCase):
tool._deprecated_port = ChromiumXVFBPort()
self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update"], step._update_command())
+ tool._deprecated_port = ChromiumAndroidPort()
+ self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--force-update", "--chromium-android"], step._update_command())
+
def test_update_command_interactive(self):
tool = MockTool()
options = MockOptions(non_interactive=False)
@@ -58,3 +61,6 @@ class UpdateTest(unittest.TestCase):
tool._deprecated_port = ChromiumXVFBPort()
self.assertEqual(["Tools/Scripts/update-webkit", "--chromium"], step._update_command())
+
+ tool._deprecated_port = ChromiumAndroidPort()
+ self.assertEqual(["Tools/Scripts/update-webkit", "--chromium", "--chromium-android"], step._update_command())
diff --git a/Tools/Scripts/webkitpy/webkitpy.pyproj b/Tools/Scripts/webkitpy/webkitpy.pyproj
index 72135a8d4..0bff5fce6 100644
--- a/Tools/Scripts/webkitpy/webkitpy.pyproj
+++ b/Tools/Scripts/webkitpy/webkitpy.pyproj
@@ -378,14 +378,14 @@
<Compile Include="tool\bot\flakytestreporter_unittest.py" />
<Compile Include="tool\bot\irc_command.py" />
<Compile Include="tool\bot\irc_command_unittest.py" />
+ <Compile Include="tool\bot\ircbot.py" />
+ <Compile Include="tool\bot\ircbot_unittest.py" />
<Compile Include="tool\bot\layouttestresultsreader.py" />
<Compile Include="tool\bot\layouttestresultsreader_unittest.py" />
<Compile Include="tool\bot\patchanalysistask.py" />
<Compile Include="tool\bot\queueengine.py" />
<Compile Include="tool\bot\queueengine_unittest.py" />
<Compile Include="tool\bot\sheriff.py" />
- <Compile Include="tool\bot\sheriffircbot.py" />
- <Compile Include="tool\bot\sheriffircbot_unittest.py" />
<Compile Include="tool\bot\sheriff_unittest.py" />
<Compile Include="tool\bot\stylequeuetask.py" />
<Compile Include="tool\bot\__init__.py" />