diff options
author | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-12 14:27:29 +0200 |
---|---|---|
committer | Allan Sandfeld Jensen <allan.jensen@qt.io> | 2020-10-13 09:35:20 +0000 |
commit | c30a6232df03e1efbd9f3b226777b07e087a1122 (patch) | |
tree | e992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/testing/scripts | |
parent | 7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff) | |
download | qtwebengine-chromium-85-based.tar.gz |
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057
Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/testing/scripts')
8 files changed, 353 insertions, 138 deletions
diff --git a/chromium/testing/scripts/OWNERS b/chromium/testing/scripts/OWNERS index bccebb03dfb..5fe2f0fa096 100644 --- a/chromium/testing/scripts/OWNERS +++ b/chromium/testing/scripts/OWNERS @@ -2,14 +2,18 @@ # and understand the implications of changing these files. dpranke@chromium.org +dpranke@google.com kbr@chromium.org martiniss@chromium.org per-file check_static_initializers.py=thakis@chromium.org per-file check_static_initializers.py=thomasanderson@chromium.org -per-file run-wpt_tests.py=lpz@chromium.org +per-file run_wpt_tests.py=lpz@chromium.org per-file run_wpt_tests.py=robertma@chromium.org +per-file wpt_common.py=lpz@chromium.org +per-file wpt_common.py=robertma@chromium.org +per-file wpt_common.py=rmhasan@chromium.org per-file run_performance_tests.py=johnchen@chromium.org per-file run_performance_tests.py=wenbinzhang@google.com diff --git a/chromium/testing/scripts/get_compile_targets.py b/chromium/testing/scripts/get_compile_targets.py index aed1c569e8a..1e5fcf02a96 100755 --- a/chromium/testing/scripts/get_compile_targets.py +++ b/chromium/testing/scripts/get_compile_targets.py @@ -30,7 +30,8 @@ def main(argv): if filename in ('common.py', 'get_compile_targets.py', 'gpu_integration_test_adapter.py', - 'sizes_common.py'): + 'sizes_common.py', + 'wpt_common.py'): continue with common.temporary_file() as tempfile_path: diff --git a/chromium/testing/scripts/representative_perf_test_data/representatives_frame_times_upper_limit.json b/chromium/testing/scripts/representative_perf_test_data/representatives_frame_times_upper_limit.json index 5b370b87b67..8f6d3038f14 100644 --- a/chromium/testing/scripts/representative_perf_test_data/representatives_frame_times_upper_limit.json +++ b/chromium/testing/scripts/representative_perf_test_data/representatives_frame_times_upper_limit.json @@ -6,7 +6,9 @@ }, "filter_terrain_svg": { "ci_095": 0.377, - "avg": 31.486 + "avg": 31.486, + "experimental": true, + "_comment": "crbug.com/1053614" }, "web_animation_value_type_transform_complex": { "ci_095": 1.783, @@ -55,7 +57,9 @@ }, "css_value_type_shadow": { "ci_095": 1.284, - "avg": 49.386 + "avg": 49.386, + "experimental": true, + "_comment": "crbug.com/1093313" }, "nvidia_vertex_buffer_object": { "ci_095": 3.37, @@ -85,7 +89,9 @@ }, "css_value_type_shadow": { "ci_095": 13.611, - "avg": 61.261 + "avg": 61.261, + "experimental": true, + "_comment": "crbug.com/1093313" }, "animometer_webgl_attrib_arrays": { "ci_095": 0.495, diff --git a/chromium/testing/scripts/run_android_wpt.py b/chromium/testing/scripts/run_android_wpt.py index e657f092f2d..7950d391f79 100755 --- a/chromium/testing/scripts/run_android_wpt.py +++ b/chromium/testing/scripts/run_android_wpt.py @@ -33,6 +33,7 @@ import shutil import sys import common +import wpt_common logger = logging.getLogger(__name__) @@ -40,37 +41,35 @@ SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)) BUILD_ANDROID = os.path.join(SRC_DIR, 'build', 'android') +BLINK_TOOLS_DIR = os.path.join( + SRC_DIR, 'third_party', 'blink', 'tools') +CATAPULT_DIR = os.path.join(SRC_DIR, 'third_party', 'catapult') +DEFAULT_WPT = os.path.join(wpt_common.WEB_TESTS_DIR, 'external', 'wpt', 'wpt') +PYUTILS = os.path.join(CATAPULT_DIR, 'common', 'py_utils') + +if PYUTILS not in sys.path: + sys.path.append(PYUTILS) + +if BLINK_TOOLS_DIR not in sys.path: + sys.path.append(BLINK_TOOLS_DIR) if BUILD_ANDROID not in sys.path: sys.path.append(BUILD_ANDROID) import devil_chromium +from blinkpy.web_tests.port.android import ( + PRODUCTS, PRODUCTS_TO_EXPECTATION_FILE_PATHS, ANDROID_WEBLAYER, + ANDROID_WEBVIEW, CHROME_ANDROID, ANDROID_DISABLED_TESTS) + from devil import devil_env from devil.android import apk_helper from devil.android import device_utils from devil.android.tools import system_app from devil.android.tools import webview_app -CATAPULT_DIR = os.path.join(SRC_DIR, 'third_party', 'catapult') -PYUTILS = os.path.join(CATAPULT_DIR, 'common', 'py_utils') - -if PYUTILS not in sys.path: - sys.path.append(PYUTILS) - from py_utils.tempfile_ext import NamedTemporaryDirectory -BLINK_TOOLS_DIR = os.path.join(SRC_DIR, 'third_party', 'blink', 'tools') -WEB_TESTS_DIR = os.path.join(BLINK_TOOLS_DIR, os.pardir, 'web_tests') -DEFAULT_WPT = os.path.join(WEB_TESTS_DIR, 'external', 'wpt', 'wpt') - -ANDROID_WEBLAYER = 'android_weblayer' -ANDROID_WEBVIEW = 'android_webview' -CHROME_ANDROID = 'chrome_android' - -# List of supported products. -PRODUCTS = [ANDROID_WEBLAYER, ANDROID_WEBVIEW, CHROME_ANDROID] - class PassThroughArgs(argparse.Action): pass_through_args = [] @@ -107,28 +106,18 @@ def _get_adapter(device): return WPTClankAdapter(device) -class WPTAndroidAdapter(common.BaseIsolatedScriptArgsAdapter): +class WPTAndroidAdapter(wpt_common.BaseWptScriptAdapter): def __init__(self, device): self.pass_through_wpt_args = [] self.pass_through_binary_args = [] self._metadata_dir = None - self._test_apk = None - self._missing_test_apk_arg = None self._device = device super(WPTAndroidAdapter, self).__init__() # Arguments from add_extra_argumentsparse were added so # its safe to parse the arguments and set self._options self.parse_args() - def generate_test_output_args(self, output): - return ['--log-chromium', output] - - def generate_sharding_args(self, total_shards, shard_index): - return ['--total-chunks=%d' % total_shards, - # shard_index is 0-based but WPT's this-chunk to be 1-based - '--this-chunk=%d' % (shard_index + 1)] - @property def rest_args(self): rest_args = super(WPTAndroidAdapter, self).rest_args @@ -169,29 +158,25 @@ class WPTAndroidAdapter(common.BaseIsolatedScriptArgsAdapter): raise NotImplementedError def _maybe_build_metadata(self): - if not self._test_apk: - assert self._missing_test_apk_arg, ( - 'self._missing_test_apk_arg was not set.') - logger.warning('%s was not set, skipping metadata generation.' % - self._missing_test_apk_arg) - return - metadata_builder_cmd = [ sys.executable, - os.path.join(BLINK_TOOLS_DIR, 'build_wpt_metadata.py'), - '--android-apk', - self._test_apk, + os.path.join(wpt_common.BLINK_TOOLS_DIR, 'build_wpt_metadata.py'), + '--android-product', + self.options.product, + '--ignore-default-expectations', '--metadata-output-dir', self._metadata_dir, '--additional-expectations', - os.path.join(WEB_TESTS_DIR, 'android', 'AndroidWPTNeverFixTests') + ANDROID_DISABLED_TESTS, ] metadata_builder_cmd.extend(self._extra_metadata_builder_args()) - common.run_command(metadata_builder_cmd) + return common.run_command(metadata_builder_cmd) def run_test(self): with NamedTemporaryDirectory() as self._metadata_dir, self._install_apks(): - self._maybe_build_metadata() + metadata_command_ret = self._maybe_build_metadata() + if metadata_command_ret != 0: + return metadata_command_ret return super(WPTAndroidAdapter, self).run_test() def _install_apks(self): @@ -202,24 +187,6 @@ class WPTAndroidAdapter(common.BaseIsolatedScriptArgsAdapter): # which was deleted self._metadata_dir = None - def do_post_test_run_tasks(self): - # Move json results into layout-test-results directory - results_dir = os.path.dirname(self.options.isolated_script_test_output) - layout_test_results = os.path.join(results_dir, 'layout-test-results') - os.mkdir(layout_test_results) - shutil.copyfile(self.options.isolated_script_test_output, - os.path.join(layout_test_results, 'full_results.json')) - # create full_results_jsonp.js file which is used to - # load results into the results viewer - with open(self.options.isolated_script_test_output, 'r') as full_results, \ - open(os.path.join( - layout_test_results, 'full_results_jsonp.js'), 'w') as json_js: - json_js.write('ADD_FULL_RESULTS(%s);' % full_results.read()) - # copy layout test results viewer to layout-test-results directory - shutil.copyfile( - os.path.join(WEB_TESTS_DIR, 'fast', 'harness', 'results.html'), - os.path.join(layout_test_results, 'results.html')) - def add_extra_arguments(self, parser): # TODO: |pass_through_args| are broke and need to be supplied by way of # --binary-arg". @@ -282,11 +249,6 @@ class WPTWeblayerAdapter(WPTAndroidAdapter): WEBLAYER_SHELL_PKG = 'org.chromium.weblayer.shell' WEBLAYER_SUPPORT_PKG = 'org.chromium.weblayer.support' - def __init__(self, device): - super(WPTWeblayerAdapter, self).__init__(device) - self._test_apk = self.options.weblayer_shell - self._missing_test_apk_arg = '--weblayer-shell' - @contextlib.contextmanager def _install_apks(self): install_weblayer_shell_as_needed = maybe_install_user_apk( @@ -304,8 +266,7 @@ class WPTWeblayerAdapter(WPTAndroidAdapter): def _extra_metadata_builder_args(self): return [ '--additional-expectations', - os.path.join(WEB_TESTS_DIR, - 'android', 'WeblayerWPTOverrideExpectations')] + PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBLAYER]] def add_extra_arguments(self, parser): super(WPTWeblayerAdapter, self).add_extra_arguments(parser) @@ -327,11 +288,6 @@ class WPTWebviewAdapter(WPTAndroidAdapter): SYSTEM_WEBVIEW_SHELL_PKG = 'org.chromium.webview_shell' - def __init__(self, device): - super(WPTWebviewAdapter, self).__init__(device) - self._test_apk = self.options.system_webview_shell - self._missing_test_apk_arg = '--system-webview-shell' - @contextlib.contextmanager def _install_apks(self): install_shell_as_needed = maybe_install_user_apk( @@ -346,8 +302,7 @@ class WPTWebviewAdapter(WPTAndroidAdapter): def _extra_metadata_builder_args(self): return [ '--additional-expectations', - os.path.join( - WEB_TESTS_DIR, 'android', 'WebviewWPTOverrideExpectations')] + PRODUCTS_TO_EXPECTATION_FILE_PATHS[ANDROID_WEBVIEW]] def add_extra_arguments(self, parser): super(WPTWebviewAdapter, self).add_extra_arguments(parser) @@ -366,11 +321,6 @@ class WPTWebviewAdapter(WPTAndroidAdapter): class WPTClankAdapter(WPTAndroidAdapter): - def __init__(self, device): - super(WPTClankAdapter, self).__init__(device) - self._test_apk = self.options.chrome_apk - self._missing_test_apk_arg = '--chrome-apk' - @contextlib.contextmanager def _install_apks(self): install_clank_as_needed = maybe_install_user_apk( @@ -381,7 +331,7 @@ class WPTClankAdapter(WPTAndroidAdapter): def _extra_metadata_builder_args(self): return [ '--additional-expectations', - os.path.join(WEB_TESTS_DIR, 'android', 'ClankWPTOverrideExpectations')] + PRODUCTS_TO_EXPECTATION_FILE_PATHS[CHROME_ANDROID]] def add_extra_arguments(self, parser): super(WPTClankAdapter, self).add_extra_arguments(parser) diff --git a/chromium/testing/scripts/run_android_wpt.pydeps b/chromium/testing/scripts/run_android_wpt.pydeps index 3f75fc0f319..06416555afa 100644 --- a/chromium/testing/scripts/run_android_wpt.pydeps +++ b/chromium/testing/scripts/run_android_wpt.pydeps @@ -6,8 +6,66 @@ //build/android/pylib/constants/host_paths.py //testing/scripts/common.py //testing/scripts/run_android_wpt.py +//testing/scripts/wpt_common.py //testing/test_env.py //testing/xvfb.py +//third_party/blink/tools/blinkpy/__init__.py +//third_party/blink/tools/blinkpy/common/__init__.py +//third_party/blink/tools/blinkpy/common/checkout/__init__.py +//third_party/blink/tools/blinkpy/common/checkout/git.py +//third_party/blink/tools/blinkpy/common/exit_codes.py +//third_party/blink/tools/blinkpy/common/find_files.py +//third_party/blink/tools/blinkpy/common/host.py +//third_party/blink/tools/blinkpy/common/html_diff.py +//third_party/blink/tools/blinkpy/common/memoized.py +//third_party/blink/tools/blinkpy/common/net/__init__.py +//third_party/blink/tools/blinkpy/common/net/network_transaction.py +//third_party/blink/tools/blinkpy/common/net/results_fetcher.py +//third_party/blink/tools/blinkpy/common/net/web.py +//third_party/blink/tools/blinkpy/common/net/web_test_results.py +//third_party/blink/tools/blinkpy/common/path_finder.py +//third_party/blink/tools/blinkpy/common/read_checksum_from_png.py +//third_party/blink/tools/blinkpy/common/system/__init__.py +//third_party/blink/tools/blinkpy/common/system/executive.py +//third_party/blink/tools/blinkpy/common/system/filesystem.py +//third_party/blink/tools/blinkpy/common/system/path.py +//third_party/blink/tools/blinkpy/common/system/platform_info.py +//third_party/blink/tools/blinkpy/common/system/profiler.py +//third_party/blink/tools/blinkpy/common/system/system_host.py +//third_party/blink/tools/blinkpy/common/system/user.py +//third_party/blink/tools/blinkpy/common/unified_diff.py +//third_party/blink/tools/blinkpy/w3c/__init__.py +//third_party/blink/tools/blinkpy/w3c/wpt_manifest.py +//third_party/blink/tools/blinkpy/web_tests/__init__.py +//third_party/blink/tools/blinkpy/web_tests/breakpad/__init__.py +//third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader.py +//third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_multipart.py +//third_party/blink/tools/blinkpy/web_tests/breakpad/dump_reader_win.py +//third_party/blink/tools/blinkpy/web_tests/builder_list.py +//third_party/blink/tools/blinkpy/web_tests/controllers/__init__.py +//third_party/blink/tools/blinkpy/web_tests/controllers/repaint_overlay.py +//third_party/blink/tools/blinkpy/web_tests/layout_package/__init__.py +//third_party/blink/tools/blinkpy/web_tests/layout_package/bot_test_expectations.py +//third_party/blink/tools/blinkpy/web_tests/layout_package/json_results_generator.py +//third_party/blink/tools/blinkpy/web_tests/models/__init__.py +//third_party/blink/tools/blinkpy/web_tests/models/test_configuration.py +//third_party/blink/tools/blinkpy/web_tests/models/test_expectations.py +//third_party/blink/tools/blinkpy/web_tests/models/test_failures.py +//third_party/blink/tools/blinkpy/web_tests/models/test_run_results.py +//third_party/blink/tools/blinkpy/web_tests/models/typ_types.py +//third_party/blink/tools/blinkpy/web_tests/port/__init__.py +//third_party/blink/tools/blinkpy/web_tests/port/android.py +//third_party/blink/tools/blinkpy/web_tests/port/base.py +//third_party/blink/tools/blinkpy/web_tests/port/driver.py +//third_party/blink/tools/blinkpy/web_tests/port/factory.py +//third_party/blink/tools/blinkpy/web_tests/port/linux.py +//third_party/blink/tools/blinkpy/web_tests/port/server_process.py +//third_party/blink/tools/blinkpy/web_tests/port/win.py +//third_party/blink/tools/blinkpy/web_tests/servers/__init__.py +//third_party/blink/tools/blinkpy/web_tests/servers/apache_http.py +//third_party/blink/tools/blinkpy/web_tests/servers/pywebsocket.py +//third_party/blink/tools/blinkpy/web_tests/servers/server_base.py +//third_party/blink/tools/blinkpy/web_tests/servers/wptserve.py //third_party/catapult/common/py_utils/py_utils/__init__.py //third_party/catapult/common/py_utils/py_utils/cloud_storage.py //third_party/catapult/common/py_utils/py_utils/cloud_storage_global_lock.py @@ -30,7 +88,7 @@ //third_party/catapult/devil/devil/android/constants/chrome.py //third_party/catapult/devil/devil/android/constants/file_system.py //third_party/catapult/devil/devil/android/decorators.py -//third_party/catapult/devil/devil/android/device_blacklist.py +//third_party/catapult/devil/devil/android/device_denylist.py //third_party/catapult/devil/devil/android/device_errors.py //third_party/catapult/devil/devil/android/device_signal.py //third_party/catapult/devil/devil/android/device_temp_file.py @@ -71,4 +129,19 @@ //third_party/catapult/devil/devil/utils/timeout_retry.py //third_party/catapult/devil/devil/utils/watchdog_timer.py //third_party/catapult/devil/devil/utils/zip_utils.py +//third_party/catapult/third_party/typ/typ/__init__.py +//third_party/catapult/third_party/typ/typ/arg_parser.py +//third_party/catapult/third_party/typ/typ/artifacts.py +//third_party/catapult/third_party/typ/typ/expectations_parser.py +//third_party/catapult/third_party/typ/typ/fakes/__init__.py +//third_party/catapult/third_party/typ/typ/fakes/host_fake.py +//third_party/catapult/third_party/typ/typ/host.py +//third_party/catapult/third_party/typ/typ/json_results.py +//third_party/catapult/third_party/typ/typ/pool.py +//third_party/catapult/third_party/typ/typ/printer.py +//third_party/catapult/third_party/typ/typ/python_2_3_compat.py +//third_party/catapult/third_party/typ/typ/runner.py +//third_party/catapult/third_party/typ/typ/stats.py +//third_party/catapult/third_party/typ/typ/test_case.py +//third_party/catapult/third_party/typ/typ/version.py //third_party/catapult/third_party/zipfile/zipfile_2_7_13.py diff --git a/chromium/testing/scripts/run_rendering_benchmark_with_gated_performance.py b/chromium/testing/scripts/run_rendering_benchmark_with_gated_performance.py index db8852291f1..4c58b74944b 100755 --- a/chromium/testing/scripts/run_rendering_benchmark_with_gated_performance.py +++ b/chromium/testing/scripts/run_rendering_benchmark_with_gated_performance.py @@ -124,12 +124,19 @@ class RenderingRepresentativePerfTest(object): self.benchmark = self.options.benchmarks out_dir_path = os.path.dirname(self.options.isolated_script_test_output) - self.output_path = os.path.join( - out_dir_path, self.benchmark, 'test_results.json') - self.results_path = os.path.join( - out_dir_path, self.benchmark, 'perf_results.csv') - re_run_output_dir = os.path.join(out_dir_path, 're_run_failures') + + self.output_path = { + True: os.path.join( + re_run_output_dir, self.benchmark, 'test_results.json'), + False: os.path.join(out_dir_path, self.benchmark, 'test_results.json') + } + self.results_path = { + True: os.path.join( + re_run_output_dir, self.benchmark, 'perf_results.csv'), + False: os.path.join(out_dir_path, self.benchmark, 'perf_results.csv') + } + re_run_test_output = os.path.join(re_run_output_dir, os.path.basename(self.options.isolated_script_test_output)) re_run_test_perf_output = os.path.join(re_run_output_dir, @@ -236,37 +243,38 @@ class RenderingRepresentativePerfTest(object): METRIC_NAME, measured_avg, upper_limit_avg)) def interpret_run_benchmark_results(self, rerun=False): - with open(self.output_path, 'r+') as resultsFile: + with open(self.output_path[rerun], 'r+') as resultsFile: initialOut = json.load(resultsFile) self.result_recorder[rerun].set_tests(initialOut) - with open(self.results_path) as csv_file: + with open(self.results_path[rerun]) as csv_file: csv_obj = csv.DictReader(csv_file) values_per_story = self.parse_csv_results(csv_obj) - # Clearing the result of run_benchmark and write the gated perf results - resultsFile.seek(0) - resultsFile.truncate(0) + if not rerun: + # Clearing the result of run_benchmark and write the gated perf results + resultsFile.seek(0) + resultsFile.truncate(0) self.compare_values(values_per_story, rerun) - def run_perf_tests(self, rerun=False): - self.return_code |= run_performance_tests.main( - self.re_run_args if rerun else self.args) - self.interpret_run_benchmark_results(rerun) + def run_perf_tests(self): + self.return_code |= run_performance_tests.main(self.args) + self.interpret_run_benchmark_results(False) - if not rerun and len(self.result_recorder[rerun].failed_stories) > 0: + if len(self.result_recorder[False].failed_stories) > 0: # For failed stories we run_tests again to make sure it's not a false # positive. print('============ Re_run the failed tests ============') all_failed_stories = '('+'|'.join( - self.result_recorder[rerun].failed_stories)+')' + self.result_recorder[False].failed_stories)+')' # TODO(crbug.com/1055893): Remove the extra chrome categories after # investigation of flakes in representative perf tests. self.re_run_args.extend( ['--story-filter', all_failed_stories, '--pageset-repeat=3', '--extra-chrome-categories=blink,blink_gc,gpu,v8,viz']) - self.run_perf_tests(True) + self.return_code |= run_performance_tests.main(self.re_run_args) + self.interpret_run_benchmark_results(True) for story_name in self.result_recorder[False].failed_stories.copy(): if story_name not in self.result_recorder[True].failed_stories: @@ -283,7 +291,7 @@ class RenderingRepresentativePerfTest(object): self.return_code ) = self.result_recorder[False].get_output(self.return_code) - with open(self.output_path, 'r+') as resultsFile: + with open(self.output_path[False], 'r+') as resultsFile: json.dump(finalOut, resultsFile, indent=4) with open(self.options.isolated_script_test_output, 'w') as outputFile: json.dump(finalOut, outputFile, indent=4) diff --git a/chromium/testing/scripts/run_wpt_tests.py b/chromium/testing/scripts/run_wpt_tests.py index 86ca893abf2..b777ba189b7 100755 --- a/chromium/testing/scripts/run_wpt_tests.py +++ b/chromium/testing/scripts/run_wpt_tests.py @@ -16,26 +16,16 @@ Here's the mapping [isolate script flag] : [wpt flag] import json import os -import shutil import sys import common +import wpt_common -BLINK_TOOLS_DIR = os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools') -WEB_TESTS_DIR = os.path.join(BLINK_TOOLS_DIR, os.pardir, 'web_tests') WPT_METADATA_DIR = "../../wpt_expectations_metadata/" WPT_OVERRIDE_EXPECTATIONS_PATH = ( "../../third_party/blink/web_tests/WPTOverrideExpectations") -class WPTTestAdapter(common.BaseIsolatedScriptArgsAdapter): - - def generate_test_output_args(self, output): - return ['--log-chromium', output] - - def generate_sharding_args(self, total_shards, shard_index): - return ['--total-chunks=%d' % total_shards, - # shard_index is 0-based but WPT's this-chunk to be 1-based - '--this-chunk=%d' % (shard_index + 1)] +class WPTTestAdapter(wpt_common.BaseWptScriptAdapter): @property def rest_args(self): @@ -88,33 +78,11 @@ class WPTTestAdapter(common.BaseIsolatedScriptArgsAdapter): ]) return rest_args - def do_post_test_run_tasks(self): - # Move json results into layout-test-results directory - results_dir = os.path.dirname(self.options.isolated_script_test_output) - layout_test_results = os.path.join(results_dir, 'layout-test-results') - if os.path.exists(layout_test_results): - shutil.rmtree(layout_test_results) - os.mkdir(layout_test_results) - shutil.copyfile(self.options.isolated_script_test_output, - os.path.join(layout_test_results, 'full_results.json')) - # create full_results_jsonp.js file which is used to - # load results into the results viewer - with open(self.options.isolated_script_test_output, 'r') \ - as full_results, \ - open(os.path.join( - layout_test_results, 'full_results_jsonp.js'), 'w') \ - as json_js: - json_js.write('ADD_FULL_RESULTS(%s);' % full_results.read()) - # copy layout test results viewer to layout-test-results directory - shutil.copyfile( - os.path.join(WEB_TESTS_DIR, 'fast', 'harness', 'results.html'), - os.path.join(layout_test_results, 'results.html')) - def main(): # First, generate WPT metadata files. common.run_command([ sys.executable, - os.path.join(BLINK_TOOLS_DIR, 'build_wpt_metadata.py'), + os.path.join(wpt_common.BLINK_TOOLS_DIR, 'build_wpt_metadata.py'), "--metadata-output-dir", WPT_METADATA_DIR, "--additional-expectations", diff --git a/chromium/testing/scripts/wpt_common.py b/chromium/testing/scripts/wpt_common.py new file mode 100644 index 00000000000..ad1ca0eb0db --- /dev/null +++ b/chromium/testing/scripts/wpt_common.py @@ -0,0 +1,205 @@ +# Copyright 2020 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import base64 +import json +import os +import shutil +import sys + +import common + +BLINK_TOOLS_DIR = os.path.join(common.SRC_DIR, 'third_party', 'blink', 'tools') +WEB_TESTS_DIR = os.path.join(BLINK_TOOLS_DIR, os.pardir, 'web_tests') + +if BLINK_TOOLS_DIR not in sys.path: + sys.path.append(BLINK_TOOLS_DIR) + +from blinkpy.common.host import Host +from blinkpy.web_tests.models import test_failures + +class BaseWptScriptAdapter(common.BaseIsolatedScriptArgsAdapter): + """The base class for script adapters that use wptrunner to execute web + platform tests. This contains any code shared between these scripts, such + as integrating output with the results viewer. Subclasses contain other + (usually platform-specific) logic.""" + + def __init__(self): + super(BaseWptScriptAdapter, self).__init__() + host = Host() + self.port = host.port_factory.get() + + def generate_test_output_args(self, output): + return ['--log-chromium', output] + + def generate_sharding_args(self, total_shards, shard_index): + return ['--total-chunks=%d' % total_shards, + # shard_index is 0-based but WPT's this-chunk to be 1-based + '--this-chunk=%d' % (shard_index + 1)] + + def do_post_test_run_tasks(self): + # Move json results into layout-test-results directory + results_dir = os.path.dirname(self.options.isolated_script_test_output) + layout_test_results = os.path.join(results_dir, 'layout-test-results') + if os.path.exists(layout_test_results): + shutil.rmtree(layout_test_results) + os.mkdir(layout_test_results) + + # Perform post-processing of wptrunner output + self.process_wptrunner_output() + + shutil.copyfile(self.options.isolated_script_test_output, + os.path.join(layout_test_results, 'full_results.json')) + # create full_results_jsonp.js file which is used to + # load results into the results viewer + with open(self.options.isolated_script_test_output, 'r') \ + as full_results, \ + open(os.path.join( + layout_test_results, 'full_results_jsonp.js'), 'w') \ + as json_js: + json_js.write('ADD_FULL_RESULTS(%s);' % full_results.read()) + # copy layout test results viewer to layout-test-results directory + shutil.copyfile( + os.path.join(WEB_TESTS_DIR, 'fast', 'harness', 'results.html'), + os.path.join(layout_test_results, 'results.html')) + + def process_wptrunner_output(self): + """Post-process the output generated by wptrunner. + + This output contains a single large json file containing the raw content + or artifacts which need to be extracted into their own files and removed + from the json file (to avoid duplication).""" + output_json = json.load( + open(self.options.isolated_script_test_output, "r")) + test_json = output_json["tests"] + results_dir = os.path.dirname(self.options.isolated_script_test_output) + self._process_test_leaves(results_dir, output_json["path_delimiter"], + test_json, "") + # Write output_json back to the same file after modifying it in memory + with open(self.options.isolated_script_test_output, "w") as output_file: + json.dump(output_json, output_file) + + def _process_test_leaves(self, results_dir, delim, root_node, path_so_far): + """Finds and processes each test leaf below the specified root. + + This will recursively traverse the trie of results in the json output, + keeping track of the path to each test and identifying leaves by the + presence of certain attributes. + + Args: + results_dir: str path to the dir that results are stored + delim: str delimiter to be used for test names + root_node: dict representing the root of the trie we're currently + looking at + path_so_far: str the path to the current root_node in the trie + """ + if "actual" in root_node: + # Found a leaf, process it + if "artifacts" not in root_node: + return + log_artifact = root_node["artifacts"].pop("log", None) + if log_artifact: + artifact_subpath = self._write_log_artifact( + results_dir, path_so_far, log_artifact) + root_node["artifacts"]["actual_text"] = [artifact_subpath] + + screenshot_artifact = root_node["artifacts"].pop("screenshots", + None) + if screenshot_artifact: + screenshot_paths_dict = self._write_screenshot_artifact( + results_dir, path_so_far, screenshot_artifact) + for screenshot_key, path in screenshot_paths_dict.items(): + root_node["artifacts"][screenshot_key] = [path] + + return + + # We're not at a leaf node, continue traversing the trie. + for key in root_node: + # Append the key to the path, separated by the delimiter. However if + # the path is empty, skip the delimiter to avoid a leading slash in + # the path. + new_path = path_so_far + delim + key if path_so_far else key + self._process_test_leaves(results_dir, delim, root_node[key], + new_path) + + def _write_log_artifact(self, results_dir, test_name, log_artifact): + """Writes a log artifact to disk. + + The log artifact contains all the output of a test. It gets written to + the -actual.txt file for the test. + + Args: + results_dir: str path to the directory that results live in + test_name: str name of the test that this artifact is for + log_artifact: list of strings, the log entries for this test from + the json output. + + Returns: + string path to the artifact file that the log was written to, + relative to the directory that the original output is located. + """ + log_artifact_sub_path = ( + os.path.join("layout-test-results", + self.port.output_filename( + test_name, test_failures.FILENAME_SUFFIX_ACTUAL, + ".txt")) + ) + log_artifact_full_path = os.path.join(results_dir, + log_artifact_sub_path) + if not os.path.exists(os.path.dirname(log_artifact_full_path)): + os.makedirs(os.path.dirname(log_artifact_full_path)) + with open(log_artifact_full_path, "w") as artifact_file: + artifact_file.write("\n".join(log_artifact).encode("utf-8")) + + return log_artifact_sub_path + + def _write_screenshot_artifact(self, results_dir, test_name, + screenshot_artifact): + """Write screenshot artifact to disk. + + The screenshot artifact is a list of strings, each of which has the + format <url>:<base64-encoded PNG>. Each url-png pair is a screenshot of + either the test, or one of its refs. We can identify which screenshot is + for the test by comparing the url piece to the test name. + + Args: + results_dir: str path to the directory that results live in + test:name str name of the test that this artifact is for + screenshot_artifact: list of strings, each being a url-png pair as + described above. + + Returns: + A dict mapping the screenshot key (ie: actual, expected) to the + path of the file for that screenshot + """ + result={} + for screenshot_pair in screenshot_artifact: + screenshot_split = screenshot_pair.split(":") + url = screenshot_split[0] + # The url produced by wptrunner will have a leading / which we trim + # away for easier comparison to the test_name below. + if url.startswith("/"): + url = url[1:] + image_bytes = base64.b64decode(screenshot_split[1].strip()) + + screenshot_key = "expected_image" + file_suffix = test_failures.FILENAME_SUFFIX_EXPECTED + if test_name == url: + screenshot_key = "actual_image" + file_suffix = test_failures.FILENAME_SUFFIX_ACTUAL + + screenshot_sub_path = ( + os.path.join("layout-test-results", + self.port.output_filename( + test_name, file_suffix, ".png")) + ) + result[screenshot_key] = screenshot_sub_path + + screenshot_full_path = os.path.join(results_dir,screenshot_sub_path) + if not os.path.exists(os.path.dirname(screenshot_full_path)): + os.makedirs(os.path.dirname(screenshot_full_path)) + # Note: we are writing raw bytes to this file + with open(screenshot_full_path, "wb") as artifact_file: + artifact_file.write(image_bytes) + return result |