summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2021-03-12 09:13:00 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2021-03-16 09:58:26 +0000
commit03561cae90f1d99b5c54b1ef3be69f10e882b25e (patch)
treecc5f0958e823c044e7ae51cc0117fe51432abe5e /chromium/gpu
parentfa98118a45f7e169f8846086dc2c22c49a8ba310 (diff)
downloadqtwebengine-chromium-03561cae90f1d99b5c54b1ef3be69f10e882b25e.tar.gz
BASELINE: Update Chromium to 88.0.4324.208
Change-Id: I3ae87d23e4eff4b4a469685658740a213600c667 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn30
-rw-r--r--chromium/gpu/DIR_METADATA11
-rw-r--r--chromium/gpu/GLES2/DIR_METADATA11
-rw-r--r--chromium/gpu/GLES2/OWNERS2
-rw-r--r--chromium/gpu/OWNERS2
-rw-r--r--chromium/gpu/angle_deqp_tests_main.cc45
-rw-r--r--chromium/gpu/angle_end2end_tests_main.cc39
-rw-r--r--chromium/gpu/angle_perftests_main.cc37
-rw-r--r--chromium/gpu/angle_unittest_main.cc35
-rw-r--r--chromium/gpu/command_buffer/DIR_METADATA11
-rw-r--r--chromium/gpu/command_buffer/OWNERS2
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py2
-rwxr-xr-xchromium/gpu/command_buffer/build_raster_cmd_buffer.py2
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc2
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator_test.cc2
-rw-r--r--chromium/gpu/command_buffer/client/gl_helper.cc2
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc5
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc2
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc4
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer_test.cc2
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.cc164
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.h9
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface.h5
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.cc2
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.h5
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc1
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h2
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format.h5
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn8
-rw-r--r--chromium/gpu/command_buffer/service/DEPS6
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc1
-rw-r--r--chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h5
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc34
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h4
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc68
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.h4
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc405
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc111
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.h48
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc6
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc13
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc4
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc15
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc30
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h2
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc58
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h11
-rw-r--r--chromium/gpu/command_buffer/service/gpu_fence_manager_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gpu_switches.h2
-rw-r--r--chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.cc24
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc6
-rw-r--r--chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h1
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc42
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc2
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc20
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc14
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc42
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc9
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc14
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h5
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc29
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm9
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc10
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc14
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc8
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.cc13
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.h31
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc17
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_d3d.h4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc26
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.cc5
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.cc34
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.h6
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.cc11
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc139
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc233
-rw-r--r--chromium/gpu/config/BUILD.gn50
-rw-r--r--chromium/gpu/config/DIR_METADATA11
-rw-r--r--chromium/gpu/config/OWNERS2
-rw-r--r--chromium/gpu/config/gpu_blocklist.cc4
-rw-r--r--chromium/gpu/config/gpu_blocklist_unittest.cc7
-rw-r--r--chromium/gpu/config/gpu_control_list.cc5
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json111
-rw-r--r--chromium/gpu/config/gpu_driver_bug_workarounds.cc2
-rw-r--r--chromium/gpu/config/gpu_extra_info.cc23
-rw-r--r--chromium/gpu/config/gpu_extra_info.h71
-rw-r--r--chromium/gpu/config/gpu_feature_type.h3
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc148
-rw-r--r--chromium/gpu/config/gpu_finch_features.h14
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc58
-rw-r--r--chromium/gpu/config/gpu_info_collector.h4
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_preferences.h20
-rw-r--r--chromium/gpu/config/gpu_preferences_unittest.cc23
-rw-r--r--chromium/gpu/config/gpu_switches.cc30
-rw-r--r--chromium/gpu/config/gpu_switches.h8
-rw-r--r--chromium/gpu/config/gpu_switching.cc24
-rw-r--r--chromium/gpu/config/gpu_test_config.cc3
-rw-r--r--chromium/gpu/config/gpu_util.cc80
-rw-r--r--chromium/gpu/config/gpu_workaround_list.txt5
-rw-r--r--chromium/gpu/config/skia_limits.cc5
-rw-r--r--chromium/gpu/config/software_rendering_list.json118
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc2
-rw-r--r--chromium/gpu/ipc/BUILD.gn2
-rw-r--r--chromium/gpu/ipc/client/gpu_context_tests.h4
-rw-r--r--chromium/gpu/ipc/client/gpu_in_process_context_tests.cc4
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn19
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h1
-rw-r--r--chromium/gpu/ipc/common/gpu_extra_info.mojom31
-rw-r--r--chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc36
-rw-r--r--chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h72
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc2
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc31
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc4
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_support.cc9
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom4
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h23
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.cc9
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.h7
-rw-r--r--chromium/gpu/ipc/common/luid_mojom_traits.h2
-rw-r--r--chromium/gpu/ipc/common/mojom_traits_unittest.cc2
-rw-r--r--chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.cc104
-rw-r--r--chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.h98
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.cc10
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.h3
-rw-r--r--chromium/gpu/ipc/gpu_task_scheduler_helper.cc4
-rw-r--r--chromium/gpu/ipc/gpu_task_scheduler_helper.h11
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc2
-rw-r--r--chromium/gpu/ipc/host/shader_disk_cache_unittest.cc2
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc48
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h39
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc2
-rw-r--r--chromium/gpu/ipc/raster_in_process_context.cc4
-rw-r--r--chromium/gpu/ipc/scheduler_sequence.h2
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn3
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.cc13
-rw-r--r--chromium/gpu/ipc/service/command_buffer_stub.h15
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.cc7
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.h2
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc5
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc64
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h8
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc52
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h2
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc1117
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h338
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc933
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h263
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.cc14
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc5
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm2
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc2
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc2
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc6
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.h4
-rw-r--r--chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc6
-rw-r--r--chromium/gpu/ipc/service/webgpu_command_buffer_stub.h4
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.cc55
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.h16
-rw-r--r--chromium/gpu/ipc/webgpu_in_process_context.cc4
-rw-r--r--chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc35
-rw-r--r--chromium/gpu/vulkan/BUILD.gn24
-rw-r--r--chromium/gpu/vulkan/DIR_METADATA11
-rw-r--r--chromium/gpu/vulkan/OWNERS2
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.cc2
-rw-r--r--chromium/gpu/vulkan/demo/BUILD.gn1
-rw-r--r--chromium/gpu/vulkan/demo/DEPS1
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.cc6
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py17
-rw-r--r--chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc1
-rw-r--r--chromium/gpu/vulkan/vma_wrapper.cc16
-rw-r--r--chromium/gpu/vulkan/vma_wrapper.h4
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.cc10
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.h3
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc24
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h45
-rw-r--r--chromium/gpu/vulkan/vulkan_image.h8
-rw-r--r--chromium/gpu/vulkan/vulkan_image_android.cc5
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.cc6
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h3
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc29
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.cc9
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.h7
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc15
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.h1
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.cc29
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.h1
202 files changed, 3234 insertions, 3542 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index f640f167a67..7b1b937aaf5 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -2,19 +2,14 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/chromeos/ui_mode.gni")
import("//build/config/ui.gni")
+import("//gpu/vulkan/features.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
import("//testing/test.gni")
import("//third_party/protobuf/proto_library.gni")
import("//ui/gl/features.gni")
-# This file depends on the legacy global sources assignment filter. It should
-# be converted to check target platform before assigning source files to the
-# sources variable. Remove this import and set_sources_assignment_filter call
-# when the file has been converted. See https://crbug.com/1018739 for details.
-import("//build/config/deprecated_default_sources_assignment_filter.gni")
-set_sources_assignment_filter(deprecated_default_sources_assignment_filter)
-
config("gpu_implementation") {
defines = [ "GPU_IMPLEMENTATION" ]
configs = [
@@ -230,7 +225,7 @@ static_library("test_support") {
]
}
-if (!is_android && !is_fuchsia && !is_chromeos) {
+if (!is_android && !is_fuchsia && !is_ash) {
proto_library("gl_lpm_fuzzer_proto") {
sources = [ "command_buffer/tests/lpm/gl_lpm_fuzzer.proto" ]
@@ -350,7 +345,6 @@ test("gl_tests") {
"command_buffer/tests/gl_texture_storage_unittest.cc",
"command_buffer/tests/gl_unallocated_texture_unittest.cc",
"command_buffer/tests/gl_unittest.cc",
- "command_buffer/tests/gl_unittests_android.cc",
"command_buffer/tests/gl_virtual_contexts_ext_window_rectangles_unittest.cc",
"command_buffer/tests/gl_virtual_contexts_unittest.cc",
"command_buffer/tests/gl_webgl_multi_draw_test.cc",
@@ -362,7 +356,12 @@ test("gl_tests") {
"ipc/client/raster_in_process_context_tests.cc",
]
- if (use_dawn) {
+ if (is_android) {
+ sources += [ "command_buffer/tests/gl_unittests_android.cc" ]
+ }
+
+ # Simply loading the Vulkan driver leaks crbug.com/1134681
+ if (use_dawn && !is_lsan) {
sources += [
"command_buffer/service/webgpu_decoder_unittest.cc",
"command_buffer/tests/shared_image_gl_backing_produce_dawn_unittest.cc",
@@ -428,6 +427,14 @@ test("gl_tests") {
sources += [
"command_buffer/service/shared_image_backing_factory_d3d_unittest.cc",
]
+ } else if (is_linux || is_chromeos) {
+ # Simply loading the Vulkan driver leaks crbug.com/1134681
+ # CFI error in third_party/vulkan_memory_allocator crbug.com/1139916
+ if (enable_vulkan && !is_lsan && !is_cfi) {
+ deps += [ "//gpu/vulkan/init:init" ]
+ sources +=
+ [ "command_buffer/service/external_vk_image_factory_unittest.cc" ]
+ }
}
if (use_dawn) {
@@ -582,7 +589,7 @@ test("gpu_unittests") {
"ipc/service/gpu_watchdog_thread_unittest.cc",
]
- if (is_chromeos) {
+ if (is_ash) {
# Image decode acceleration with hardware is only supported in Chrome OS.
# The intention is to run this test in the linux-chromeos build.
sources += [ "ipc/service/image_decode_accelerator_stub_unittest.cc" ]
@@ -641,6 +648,7 @@ test("gpu_unittests") {
"//base",
"//base/test:test_support",
"//base/third_party/dynamic_annotations",
+ "//build:chromeos_buildflags",
"//cc/paint",
"//components/viz/common:resource_format",
"//gpu/command_buffer/client:gles2_c_lib",
diff --git a/chromium/gpu/DIR_METADATA b/chromium/gpu/DIR_METADATA
new file mode 100644
index 00000000000..8e87cca679a
--- /dev/null
+++ b/chromium/gpu/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Internals>GPU>Internals"
+} \ No newline at end of file
diff --git a/chromium/gpu/GLES2/DIR_METADATA b/chromium/gpu/GLES2/DIR_METADATA
new file mode 100644
index 00000000000..8e87cca679a
--- /dev/null
+++ b/chromium/gpu/GLES2/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Internals>GPU>Internals"
+} \ No newline at end of file
diff --git a/chromium/gpu/GLES2/OWNERS b/chromium/gpu/GLES2/OWNERS
index bed8d8a2d8f..a4adfa2f1ce 100644
--- a/chromium/gpu/GLES2/OWNERS
+++ b/chromium/gpu/GLES2/OWNERS
@@ -1,5 +1,3 @@
bajones@chromium.org
zmo@chromium.org
vmiura@chromium.org
-
-# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/OWNERS b/chromium/gpu/OWNERS
index 5eb00e6fdea..62a49ad1f31 100644
--- a/chromium/gpu/OWNERS
+++ b/chromium/gpu/OWNERS
@@ -14,5 +14,3 @@ per-file *passthrough*=geofflang@chromium.org
# For SharedImages
vikassoni@chromium.org
-
-# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/angle_deqp_tests_main.cc b/chromium/gpu/angle_deqp_tests_main.cc
deleted file mode 100644
index 2c1351a6821..00000000000
--- a/chromium/gpu/angle_deqp_tests_main.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/task/single_thread_task_executor.h"
-#include "base/test/launcher/unit_test_launcher.h"
-#include "base/test/test_suite.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-int RunHelper(base::TestSuite* test_suite) {
- base::SingleThreadTaskExecutor task_executor;
- return test_suite->Run();
-}
-
-} // namespace
-
-// Defined in angle_deqp_gtest.cpp. Declared here so we don't need to make a
-// header that we import in Chromium.
-namespace angle {
-void InitTestHarness(int* argc, char** argv);
-}
-
-int main(int argc, char** argv) {
- // base::CommandLine::Init must be called before angle::InitTestHarness,
- // because angle::InitTestHarness deletes ANGLE-specific arguments from argv.
- // But, on Linux, tests are run in ChildGTestProcess, which inherits its
- // command line from the one initialized in base::CommandLine::Init.
- // In this order, ChildGTestProcess inherits all the ANGLE-specific
- // arguments that it requires.
- base::CommandLine::Init(argc, argv);
- angle::InitTestHarness(&argc, argv);
- base::TestSuite test_suite(argc, argv);
-
- // The process and thread priorities are modified by
- // StabilizeCPUForBenchmarking()/SetLowPriorityProcess().
- test_suite.DisableCheckForThreadAndProcessPriority();
-
- int rt = base::LaunchUnitTestsSerially(
- argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite)));
- return rt;
-}
diff --git a/chromium/gpu/angle_end2end_tests_main.cc b/chromium/gpu/angle_end2end_tests_main.cc
deleted file mode 100644
index 75b40ed1ef1..00000000000
--- a/chromium/gpu/angle_end2end_tests_main.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/task/single_thread_task_executor.h"
-#include "base/test/launcher/unit_test_launcher.h"
-#include "base/test/test_suite.h"
-#include "testing/gmock/include/gmock/gmock.h"
-
-namespace {
-
-int RunHelper(base::TestSuite* test_suite) {
- base::SingleThreadTaskExecutor task_executor;
- return test_suite->Run();
-}
-
-} // namespace
-
-// Located in third_party/angle/src/tests/test_utils/ANGLETest.cpp.
-// Defined here so we can avoid depending on the ANGLE headers.
-void ANGLEProcessTestArgs(int *argc, char *argv[]);
-void RegisterContextCompatibilityTests();
-
-int main(int argc, char** argv) {
- base::CommandLine::Init(argc, argv);
- ANGLEProcessTestArgs(&argc, argv);
- testing::InitGoogleMock(&argc, argv);
- RegisterContextCompatibilityTests();
- base::TestSuite test_suite(argc, argv);
- int rt = base::LaunchUnitTestsWithOptions(
- argc, argv,
- 1, // Run tests serially.
- 0, // Disable batching.
- true, // Use job objects.
- base::BindOnce(&RunHelper, base::Unretained(&test_suite)));
- return rt;
-}
diff --git a/chromium/gpu/angle_perftests_main.cc b/chromium/gpu/angle_perftests_main.cc
deleted file mode 100644
index 09a97d4e8da..00000000000
--- a/chromium/gpu/angle_perftests_main.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/message_loop/message_pump_type.h"
-#include "base/task/single_thread_task_executor.h"
-#include "base/test/launcher/unit_test_launcher.h"
-#include "base/test/test_suite.h"
-
-namespace {
-
-int RunHelper(base::TestSuite* test_suite) {
- base::SingleThreadTaskExecutor io_task_executor(base::MessagePumpType::IO);
- return test_suite->Run();
-}
-
-} // namespace
-
-void ANGLEProcessPerfTestArgs(int *argc, char **argv);
-
-int main(int argc, char** argv) {
- // base::CommandLine::Init must be called before ANGLEProcessPerfTestArgs.
- // See comment in angle_deqp_tests_main.cc.
- base::CommandLine::Init(argc, argv);
- ANGLEProcessPerfTestArgs(&argc, argv);
-
- base::TestSuite test_suite(argc, argv);
-
- // The thread priority is modified by StabilizeCPUForBenchmarking().
- test_suite.DisableCheckForThreadAndProcessPriority();
-
- int rt = base::LaunchUnitTestsSerially(
- argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite)));
- return rt;
-}
diff --git a/chromium/gpu/angle_unittest_main.cc b/chromium/gpu/angle_unittest_main.cc
deleted file mode 100644
index 1c2199058ba..00000000000
--- a/chromium/gpu/angle_unittest_main.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/message_loop/message_pump_type.h"
-#include "base/task/single_thread_task_executor.h"
-#include "base/test/launcher/unit_test_launcher.h"
-#include "base/test/test_suite.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "third_party/angle/include/GLSLANG/ShaderLang.h"
-
-namespace {
-
-int RunHelper(base::TestSuite* test_suite) {
- base::SingleThreadTaskExecutor io_task_executor(base::MessagePumpType::IO);
- return test_suite->Run();
-}
-
-} // namespace
-
-int main(int argc, char** argv) {
- base::CommandLine::Init(argc, argv);
- testing::InitGoogleMock(&argc, argv);
- sh::Initialize();
-
- base::TestSuite test_suite(argc, argv);
- test_suite.DisableCheckForThreadAndProcessPriority();
-
- int rt = base::LaunchUnitTestsSerially(
- argc, argv, base::BindOnce(&RunHelper, base::Unretained(&test_suite)));
- sh::Finalize();
- return rt;
-}
diff --git a/chromium/gpu/command_buffer/DIR_METADATA b/chromium/gpu/command_buffer/DIR_METADATA
new file mode 100644
index 00000000000..8e87cca679a
--- /dev/null
+++ b/chromium/gpu/command_buffer/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Internals>GPU>Internals"
+} \ No newline at end of file
diff --git a/chromium/gpu/command_buffer/OWNERS b/chromium/gpu/command_buffer/OWNERS
index 5bdc5e2bd3c..a9bdcc7ce67 100644
--- a/chromium/gpu/command_buffer/OWNERS
+++ b/chromium/gpu/command_buffer/OWNERS
@@ -11,5 +11,3 @@ per-file *gpu_memory_buffer*=dcastagna@chromium.org
cwallez@chromium.org
enga@chromium.org
kainino@chromium.org
-
-# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 8a418d991b2..48e9e7a9e9e 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -1341,6 +1341,7 @@ _NAMED_TYPE_INFO = {
'GL_RGB_YCRCB_420_CHROMIUM',
'GL_RGB_YCBCR_422_CHROMIUM',
'GL_RGB_YCBCR_420V_CHROMIUM',
+ 'GL_RGB_YCBCR_P010_CHROMIUM',
'GL_R16_EXT',
],
},
@@ -1428,6 +1429,7 @@ _NAMED_TYPE_INFO = {
'GL_RGB_YCRCB_420_CHROMIUM',
'GL_RGB_YCBCR_422_CHROMIUM',
'GL_RGB_YCBCR_420V_CHROMIUM',
+ 'GL_RGB_YCBCR_P010_CHROMIUM',
'GL_RGBA',
],
},
diff --git a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
index 25fe5cd6620..6d0caabfdfb 100755
--- a/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_raster_cmd_buffer.py
@@ -135,7 +135,7 @@ _NAMED_TYPE_INFO = {
'viz::ResourceFormat::BGRA_1010102',
'viz::ResourceFormat::YVU_420',
'viz::ResourceFormat::YUV_420_BIPLANAR',
-
+ 'viz::ResourceFormat::P010',
],
'invalid': [
'viz::ResourceFormat::ETC1',
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
index fe21567dfde..d8837590454 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper_test.cc
@@ -11,7 +11,7 @@
#include <vector>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/macros.h"
#include "base/run_loop.h"
#include "base/test/task_environment.h"
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
index f68bf95f8b3..060f62ff0d4 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -9,7 +9,7 @@
#include <memory>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/memory/aligned_memory.h"
#include "base/run_loop.h"
#include "base/test/task_environment.h"
diff --git a/chromium/gpu/command_buffer/client/gl_helper.cc b/chromium/gpu/command_buffer/client/gl_helper.cc
index 5c6d61498a6..3d4aaf2c5b7 100644
--- a/chromium/gpu/command_buffer/client/gl_helper.cc
+++ b/chromium/gpu/command_buffer/client/gl_helper.cc
@@ -11,7 +11,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/check_op.h"
#include "base/containers/queue.h"
#include "base/lazy_instance.h"
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index e1611b999bb..6400b35a7e9 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -880,7 +880,9 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_GPU_DISJOINT_EXT:
*params = static_cast<GLint>(query_tracker_->CheckAndResetDisjoint());
return true;
-
+ case GL_UNPACK_ALIGNMENT:
+ *params = unpack_alignment_;
+ return true;
case GL_VIEWPORT:
if (state_.viewport_width > 0 && state_.viewport_height > 0 &&
capabilities_.max_viewport_width > 0 &&
@@ -962,7 +964,6 @@ bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
case GL_STENCIL_VALUE_MASK:
case GL_STENCIL_WRITEMASK:
case GL_SUBPIXEL_BITS:
- case GL_UNPACK_ALIGNMENT:
return false;
default:
break;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index d1190200de4..524f8eae425 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -135,7 +135,7 @@ class ScopedSharedMemoryPtr {
} // namespace
// Helper to copy data to the GPU service over the transfer cache.
-class RasterImplementation::TransferCacheSerializeHelperImpl
+class RasterImplementation::TransferCacheSerializeHelperImpl final
: public cc::TransferCacheSerializeHelper {
public:
explicit TransferCacheSerializeHelperImpl(RasterImplementation* ri)
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index 997a142250c..7199aef0f3b 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -178,6 +178,9 @@ void RasterImplementationGLES::WritePixels(const gpu::Mailbox& dest_mailbox,
BeginSharedImageAccessDirectCHROMIUM(
texture_id, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ GLint old_align = 0;
+ gl_->GetIntegerv(GL_UNPACK_ALIGNMENT, &old_align);
+ gl_->PixelStorei(GL_UNPACK_ALIGNMENT, 1);
gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, row_bytes / src_info.bytesPerPixel());
gl_->BindTexture(texture_target, texture_id);
gl_->TexSubImage2D(texture_target, 0, dst_x_offset, dst_y_offset,
@@ -186,6 +189,7 @@ void RasterImplementationGLES::WritePixels(const gpu::Mailbox& dest_mailbox,
SkColorTypeToGLDataType(src_info.colorType()), src_pixels);
gl_->BindTexture(texture_target, 0);
gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
+ gl_->PixelStorei(GL_UNPACK_ALIGNMENT, old_align);
EndSharedImageAccessDirectCHROMIUM(texture_id);
DeleteGpuRasterTexture(texture_id);
diff --git a/chromium/gpu/command_buffer/client/ring_buffer_test.cc b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
index 1e8dbc55376..65ddddc6553 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer_test.cc
+++ b/chromium/gpu/command_buffer/client/ring_buffer_test.cc
@@ -11,7 +11,7 @@
#include <memory>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/run_loop.h"
#include "base/test/task_environment.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
index c558e420ed0..c12b3829052 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
@@ -80,50 +80,55 @@ void WebGPUCommandSerializer::RequestDeviceCreation(
helper_->Flush();
}
+size_t WebGPUCommandSerializer::GetMaximumAllocationSize() const {
+ return c2s_transfer_buffer_->GetMaxSize();
+}
+
void* WebGPUCommandSerializer::GetCmdSpace(size_t size) {
+ // Note: Dawn will never call this function with |size| >
+ // GetMaximumAllocationSize().
+ DCHECK_LE(size, GetMaximumAllocationSize());
+
// The buffer size must be initialized before any commands are serialized.
- if (c2s_buffer_default_size_ == 0u) {
- NOTREACHED();
- return nullptr;
- }
+ DCHECK_NE(c2s_buffer_default_size_, 0u);
- base::CheckedNumeric<uint32_t> checked_next_offset(c2s_put_offset_);
- checked_next_offset += size;
+ DCHECK_LE(c2s_put_offset_, c2s_buffer_.size());
+ const bool overflows_remaining_space =
+ size > static_cast<size_t>(c2s_buffer_.size() - c2s_put_offset_);
- uint32_t next_offset;
- bool next_offset_valid = checked_next_offset.AssignIfValid(&next_offset);
+ if (LIKELY(c2s_buffer_.valid() && !overflows_remaining_space)) {
+ // If the buffer is valid and has sufficient space, return the
+ // pointer and increment the offset.
+ uint8_t* ptr = static_cast<uint8_t*>(c2s_buffer_.address());
+ ptr += c2s_put_offset_;
- // If the buffer does not have enough space, or if the buffer is not
- // initialized, flush and reset the command stream.
- if (!next_offset_valid || next_offset > c2s_buffer_.size() ||
- !c2s_buffer_.valid()) {
- Flush();
+ c2s_put_offset_ += static_cast<uint32_t>(size);
+ return ptr;
+ }
- uint32_t max_allocation = c2s_transfer_buffer_->GetMaxSize();
- // TODO(crbug.com/951558): Handle command chunking or ensure commands aren't
- // this large.
- CHECK_LE(size, max_allocation);
+ if (!c2s_transfer_buffer_) {
+ // The serializer hit a fatal error and was disconnected.
+ return nullptr;
+ }
- uint32_t allocation_size =
- std::max(c2s_buffer_default_size_, static_cast<uint32_t>(size));
- TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
- "WebGPUCommandSerializer::GetCmdSpace", "bytes",
- allocation_size);
- c2s_buffer_.Reset(allocation_size);
- c2s_put_offset_ = 0;
- next_offset = size;
+ // Otherwise, flush and reset the command stream.
+ Flush();
- // TODO(crbug.com/951558): Handle OOM.
- CHECK(c2s_buffer_.valid());
- CHECK_LE(size, c2s_buffer_.size());
- }
+ uint32_t allocation_size =
+ std::max(c2s_buffer_default_size_, static_cast<uint32_t>(size));
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUCommandSerializer::GetCmdSpace", "bytes",
+ allocation_size);
+ c2s_buffer_.Reset(allocation_size);
- DCHECK(c2s_buffer_.valid());
- uint8_t* ptr = static_cast<uint8_t*>(c2s_buffer_.address());
- ptr += c2s_put_offset_;
+ if (!c2s_buffer_.valid() || c2s_buffer_.size() < size) {
+ DLOG(ERROR) << "Dawn wire transfer buffer allocation failed";
+ HandleGpuControlLostContext();
+ return nullptr;
+ }
- c2s_put_offset_ = next_offset;
- return ptr;
+ c2s_put_offset_ = size;
+ return c2s_buffer_.address();
}
bool WebGPUCommandSerializer::Flush() {
@@ -160,11 +165,9 @@ void WebGPUCommandSerializer::HandleGpuControlLostContext() {
c2s_buffer_.Discard();
c2s_transfer_buffer_ = nullptr;
- // Disconnect the wire client. WebGPU commands will be serialized into dummy
- // space owned by the wire client, and the device will receive a Lost event.
- // No commands will be sent after this point.
+ // Disconnect the wire client. WebGPU commands will become a noop, and the
+ // device will receive a Lost event.
// NOTE: This assumes single-threaded operation.
- // TODO(enga): Implement context reset/recovery.
wire_client_->Disconnect();
}
@@ -229,10 +232,10 @@ gpu::ContextResult WebGPUImplementation::Initialize(
memory_transfer_service_ =
std::make_unique<DawnClientMemoryTransferService>(mapped_memory_.get());
- procs_ = dawn_wire::WireClient::GetProcs();
+ procs_ = dawn_wire::client::GetProcs();
// TODO(senorblanco): Do this only once per process. Doing it once per
- // WebGPUImplementation is non-optimal but valid valid, since the returned
+ // WebGPUImplementation is non-optimal but valid, since the returned
// procs are always the same.
dawnProcSetProcs(&procs_);
#endif
@@ -419,21 +422,14 @@ void WebGPUImplementation::OnGpuControlReturnData(
"WebGPUImplementation::OnGpuControlReturnData", "bytes",
data.size());
- if (data.size() <= sizeof(cmds::DawnReturnDataHeader)) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- return;
- }
+ CHECK_GT(data.size(), sizeof(cmds::DawnReturnDataHeader));
+
const cmds::DawnReturnDataHeader& dawnReturnDataHeader =
*reinterpret_cast<const cmds::DawnReturnDataHeader*>(data.data());
switch (dawnReturnDataHeader.return_data_type) {
case DawnReturnDataType::kDawnCommands: {
- if (data.size() < sizeof(cmds::DawnReturnCommandsInfo)) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- break;
- }
+ CHECK_GE(data.size(), sizeof(cmds::DawnReturnCommandsInfo));
const cmds::DawnReturnCommandsInfo* dawn_return_commands_info =
reinterpret_cast<const cmds::DawnReturnCommandsInfo*>(data.data());
@@ -441,26 +437,19 @@ void WebGPUImplementation::OnGpuControlReturnData(
dawn_return_commands_info->header.device_client_id;
WebGPUCommandSerializer* command_serializer =
GetCommandSerializerWithDeviceClientID(device_client_id);
- if (!command_serializer) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- break;
- }
- if (!command_serializer->HandleCommands(
- reinterpret_cast<const char*>(
- dawn_return_commands_info->deserialized_buffer),
- data.size() - offsetof(cmds::DawnReturnCommandsInfo,
- deserialized_buffer))) {
- // TODO(enga): Lose the context.
- NOTREACHED();
- }
+ CHECK(command_serializer);
+
+ // TODO(enga): Instead of a CHECK, this could generate a device lost
+ // event on just that device. It doesn't seem worth doing right now
+ // since a failure here is likely not recoverable.
+ CHECK(command_serializer->HandleCommands(
+ reinterpret_cast<const char*>(
+ dawn_return_commands_info->deserialized_buffer),
+ data.size() -
+ offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)));
} break;
case DawnReturnDataType::kRequestedDawnAdapterProperties: {
- if (data.size() < sizeof(cmds::DawnReturnAdapterInfo)) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- break;
- }
+ CHECK_GE(data.size(), sizeof(cmds::DawnReturnAdapterInfo));
const cmds::DawnReturnAdapterInfo* returned_adapter_info =
reinterpret_cast<const cmds::DawnReturnAdapterInfo*>(data.data());
@@ -469,11 +458,8 @@ void WebGPUImplementation::OnGpuControlReturnData(
returned_adapter_info->header.request_adapter_serial;
auto request_callback_iter =
request_adapter_callback_map_.find(request_adapter_serial);
- if (request_callback_iter == request_adapter_callback_map_.end()) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- break;
- }
+ CHECK(request_callback_iter != request_adapter_callback_map_.end());
+
auto& request_callback = request_callback_iter->second;
GLuint adapter_service_id =
returned_adapter_info->header.adapter_service_id;
@@ -481,17 +467,22 @@ void WebGPUImplementation::OnGpuControlReturnData(
const volatile char* deserialized_buffer =
reinterpret_cast<const volatile char*>(
returned_adapter_info->deserialized_buffer);
- dawn_wire::DeserializeWGPUDeviceProperties(&adapter_properties,
- deserialized_buffer);
- std::move(request_callback).Run(adapter_service_id, adapter_properties);
+ if (returned_adapter_info->adapter_properties_size > 0) {
+ dawn_wire::DeserializeWGPUDeviceProperties(&adapter_properties,
+ deserialized_buffer);
+ }
+ const char* error_message =
+ returned_adapter_info->deserialized_buffer +
+ returned_adapter_info->adapter_properties_size;
+ if (strlen(error_message) == 0) {
+ error_message = nullptr;
+ }
+ std::move(request_callback)
+ .Run(adapter_service_id, adapter_properties, error_message);
request_adapter_callback_map_.erase(request_callback_iter);
} break;
case DawnReturnDataType::kRequestedDeviceReturnInfo: {
- if (data.size() < sizeof(cmds::DawnReturnRequestDeviceInfo)) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- break;
- }
+ CHECK_GE(data.size(), sizeof(cmds::DawnReturnRequestDeviceInfo));
const cmds::DawnReturnRequestDeviceInfo* returned_request_device_info =
reinterpret_cast<const cmds::DawnReturnRequestDeviceInfo*>(
@@ -501,11 +492,8 @@ void WebGPUImplementation::OnGpuControlReturnData(
returned_request_device_info->device_client_id;
auto request_callback_iter =
request_device_callback_map_.find(device_client_id);
- if (request_callback_iter == request_device_callback_map_.end()) {
- // TODO(jiawei.shao@intel.com): Lose the context.
- NOTREACHED();
- break;
- }
+ CHECK(request_callback_iter != request_device_callback_map_.end());
+
auto& request_callback = request_callback_iter->second;
bool is_request_device_success =
returned_request_device_info->is_request_device_success;
@@ -519,9 +507,7 @@ void WebGPUImplementation::OnGpuControlReturnData(
request_device_callback_map_.erase(request_callback_iter);
} break;
default:
- // TODO(jiawei.shao@intel.com): Lose the context.
NOTREACHED();
- break;
}
#endif
}
@@ -660,7 +646,7 @@ DawnRequestAdapterSerial WebGPUImplementation::NextRequestAdapterSerial() {
bool WebGPUImplementation::RequestAdapterAsync(
PowerPreference power_preference,
- base::OnceCallback<void(int32_t, const WGPUDeviceProperties&)>
+ base::OnceCallback<void(int32_t, const WGPUDeviceProperties&, const char*)>
request_adapter_callback) {
if (lost_) {
return false;
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.h b/chromium/gpu/command_buffer/client/webgpu_implementation.h
index c1fc35bd8c8..774937410d4 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.h
@@ -44,6 +44,7 @@ class WebGPUCommandSerializer final : public dawn_wire::CommandSerializer {
const WGPUDeviceProperties& requested_device_properties);
// dawn_wire::CommandSerializer implementation
+ size_t GetMaximumAllocationSize() const final;
void* GetCmdSpace(size_t size) final;
bool Flush() final;
@@ -166,8 +167,9 @@ class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
ReservedTexture ReserveTexture(DawnDeviceClientID device_client_id) override;
bool RequestAdapterAsync(
PowerPreference power_preference,
- base::OnceCallback<void(int32_t, const WGPUDeviceProperties&)>
- request_adapter_callback) override;
+ base::OnceCallback<void(int32_t,
+ const WGPUDeviceProperties&,
+ const char*)> request_adapter_callback) override;
bool RequestDeviceAsync(
uint32_t requested_adapter_id,
const WGPUDeviceProperties& requested_device_properties,
@@ -198,7 +200,8 @@ class WEBGPU_EXPORT WebGPUImplementation final : public WebGPUInterface,
LogSettings log_settings_;
base::flat_map<DawnRequestAdapterSerial,
- base::OnceCallback<void(int32_t, const WGPUDeviceProperties&)>>
+ base::OnceCallback<
+ void(int32_t, const WGPUDeviceProperties&, const char*)>>
request_adapter_callback_map_;
DawnRequestAdapterSerial request_adapter_serial_ = 0;
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface.h b/chromium/gpu/command_buffer/client/webgpu_interface.h
index e09175e998f..3512624ac8a 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface.h
@@ -51,8 +51,9 @@ class WebGPUInterface : public InterfaceBase {
DawnDeviceClientID device_client_id) = 0;
virtual bool RequestAdapterAsync(
PowerPreference power_preference,
- base::OnceCallback<void(int32_t, const WGPUDeviceProperties&)>
- request_adapter_callback) = 0;
+ base::OnceCallback<void(int32_t,
+ const WGPUDeviceProperties&,
+ const char*)> request_adapter_callback) = 0;
virtual bool RequestDeviceAsync(
uint32_t adapter_service_id,
const WGPUDeviceProperties& requested_device_properties,
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
index 1ace9f20064..7b34ab75f45 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
@@ -38,7 +38,7 @@ ReservedTexture WebGPUInterfaceStub::ReserveTexture(
}
bool WebGPUInterfaceStub::RequestAdapterAsync(
PowerPreference power_preference,
- base::OnceCallback<void(int32_t, const WGPUDeviceProperties&)>
+ base::OnceCallback<void(int32_t, const WGPUDeviceProperties&, const char*)>
request_adapter_callback) {
return false;
}
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
index 22a9112fe67..509cc6eeb30 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
@@ -33,8 +33,9 @@ class WebGPUInterfaceStub : public WebGPUInterface {
ReservedTexture ReserveTexture(DawnDeviceClientID device_client_id) override;
bool RequestAdapterAsync(
PowerPreference power_preference,
- base::OnceCallback<void(int32_t, const WGPUDeviceProperties&)>
- request_adapter_callback) override;
+ base::OnceCallback<void(int32_t,
+ const WGPUDeviceProperties&,
+ const char*)> request_adapter_callback) override;
bool RequestDeviceAsync(
uint32_t adapter_service_id,
const WGPUDeviceProperties& requested_device_properties,
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index c8590aca011..dec0cfb0c2a 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -1167,6 +1167,7 @@ uint32_t GLES2Util::GetChannelsForFormat(int format) {
return kRGB;
case GL_RGB_YCRCB_420_CHROMIUM:
case GL_RGB_YCBCR_420V_CHROMIUM:
+ case GL_RGB_YCBCR_P010_CHROMIUM:
case GL_BGRA_EXT:
case GL_BGRA8_EXT:
case GL_RGBA16F_EXT:
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index b19fdc2ce18..292165f72e8 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -7416,6 +7416,7 @@ std::string GLES2Util::GetStringImageInternalFormat(uint32_t value) {
{GL_RGB_YCRCB_420_CHROMIUM, "GL_RGB_YCRCB_420_CHROMIUM"},
{GL_RGB_YCBCR_422_CHROMIUM, "GL_RGB_YCBCR_422_CHROMIUM"},
{GL_RGB_YCBCR_420V_CHROMIUM, "GL_RGB_YCBCR_420V_CHROMIUM"},
+ {GL_RGB_YCBCR_P010_CHROMIUM, "GL_RGB_YCBCR_P010_CHROMIUM"},
{GL_RGBA, "GL_RGBA"},
};
return GLES2Util::GetQualifiedEnumString(string_table,
@@ -8186,6 +8187,7 @@ std::string GLES2Util::GetStringTextureSizedTextureFilterableInternalFormat(
{GL_RGB_YCRCB_420_CHROMIUM, "GL_RGB_YCRCB_420_CHROMIUM"},
{GL_RGB_YCBCR_422_CHROMIUM, "GL_RGB_YCBCR_422_CHROMIUM"},
{GL_RGB_YCBCR_420V_CHROMIUM, "GL_RGB_YCBCR_420V_CHROMIUM"},
+ {GL_RGB_YCBCR_P010_CHROMIUM, "GL_RGB_YCBCR_P010_CHROMIUM"},
{GL_R16_EXT, "GL_R16_EXT"},
};
return GLES2Util::GetQualifiedEnumString(string_table,
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
index 286a41c43da..7438f69e89e 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
@@ -53,6 +53,11 @@ static_assert(offsetof(DawnReturnAdapterInfoHeader, return_data_header) == 0,
struct DawnReturnAdapterInfo {
DawnReturnAdapterInfoHeader header;
+ uint32_t adapter_properties_size;
+
+ // |deserialized_buffer| contains the serialized adapter properties if
+ // |adapter_properties_size > 0|. Following it is an optional null-terminated
+ // error message.
alignas(GPU_DAWN_RETURN_DATA_ALIGNMENT) char deserialized_buffer[];
};
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 56cb7346a69..a6adaaa0080 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -306,6 +306,7 @@ target(link_target_type, "gles2_sources") {
"//base",
"//base/third_party/dynamic_annotations",
"//build:chromecast_buildflags",
+ "//build:chromeos_buildflags",
"//components/crash/core/common",
"//components/viz/common:resource_format_utils",
"//gpu/command_buffer/client",
@@ -352,6 +353,8 @@ target(link_target_type, "gles2_sources") {
"external_vk_image_factory.h",
"external_vk_image_gl_representation.cc",
"external_vk_image_gl_representation.h",
+ "external_vk_image_overlay_representation.cc",
+ "external_vk_image_overlay_representation.h",
"external_vk_image_skia_representation.cc",
"external_vk_image_skia_representation.h",
]
@@ -387,7 +390,10 @@ target(link_target_type, "gles2_sources") {
}
if (use_dawn) {
- deps += [ "//third_party/dawn/src/dawn_native" ]
+ deps += [
+ "//third_party/dawn/src/dawn_native",
+ "//third_party/dawn/src/dawn_platform",
+ ]
}
if (is_mac) {
diff --git a/chromium/gpu/command_buffer/service/DEPS b/chromium/gpu/command_buffer/service/DEPS
index ff50b4a7bcd..eb0ba630739 100644
--- a/chromium/gpu/command_buffer/service/DEPS
+++ b/chromium/gpu/command_buffer/service/DEPS
@@ -10,3 +10,9 @@ include_rules = [
"+components/viz/common/resources/resource_format_utils.h",
"+components/viz/common/resources/resource_sizes.h",
]
+
+specific_include_rules = {
+ "external_vk_image_factory_unittest\.cc": [
+ "+components/viz/common/gpu/vulkan_in_process_context_provider.h",
+ ],
+}
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index f5f1a82f837..88f4127f187 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -636,7 +636,6 @@ void ContextGroup::Destroy(DecoderContext* decoder, bool have_context) {
passthrough_resources_.reset();
ReportProgress();
}
- memory_tracker_ = nullptr;
}
uint32_t ContextGroup::GetMemRepresented() const {
diff --git a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
index eb55bbd0845..5aa5f6c6a2d 100644
--- a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
+++ b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
@@ -13,7 +13,8 @@
namespace gpu {
namespace gles2 {
-class MockCopyTexImageResourceManager : public CopyTexImageResourceManager {
+class MockCopyTexImageResourceManager final
+ : public CopyTexImageResourceManager {
public:
MockCopyTexImageResourceManager(const gles2::FeatureInfo* feature_info);
~MockCopyTexImageResourceManager() final;
@@ -60,7 +61,7 @@ class MockCopyTexImageResourceManager : public CopyTexImageResourceManager {
DISALLOW_COPY_AND_ASSIGN(MockCopyTexImageResourceManager);
};
-class MockCopyTextureResourceManager
+class MockCopyTextureResourceManager final
: public CopyTextureCHROMIUMResourceManager {
public:
MockCopyTextureResourceManager();
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index 263c79e9986..5497b542d6d 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -11,6 +11,7 @@
#include "build/build_config.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
+#include "gpu/command_buffer/service/external_vk_image_overlay_representation.h"
#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/ipc/common/vulkan_ycbcr_info.h"
@@ -153,6 +154,7 @@ bool UseMinimalUsageFlags(SharedContextState* context_state) {
void WaitSemaphoresOnGrContext(GrDirectContext* gr_context,
std::vector<ExternalSemaphore>* semaphores) {
+ DCHECK(!gr_context->abandoned());
std::vector<GrBackendSemaphore> backend_senampres;
backend_senampres.reserve(semaphores->size());
for (auto& semaphore : *semaphores) {
@@ -219,6 +221,21 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
}
}
+ if (is_external && (usage & SHARED_IMAGE_USAGE_WEBGPU)) {
+ // The following additional usage flags are provided for Dawn:
+ //
+ // - TRANSFER_SRC: Used for copies from this image.
+ // - TRANSFER_DST: Used for copies to this image or clears.
+ vk_usage |=
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+
+ if (usage & SHARED_IMAGE_USAGE_DISPLAY) {
+ // Skia currently requires all VkImages it uses to support transfers
+ vk_usage |=
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+ }
+
auto* vulkan_implementation =
context_state->vk_context_provider()->GetVulkanImplementation();
VkImageCreateFlags vk_flags = 0;
@@ -362,8 +379,10 @@ ExternalVkImageBacking::~ExternalVkImageBacking() {
if (write_semaphore_)
semaphores.emplace_back(std::move(write_semaphore_));
- WaitSemaphoresOnGrContext(context_state()->gr_context(), &semaphores);
- ReturnPendingSemaphoresWithFenceHelper(std::move(semaphores));
+ if (!semaphores.empty() && !context_state()->gr_context()->abandoned()) {
+ WaitSemaphoresOnGrContext(context_state()->gr_context(), &semaphores);
+ ReturnPendingSemaphoresWithFenceHelper(std::move(semaphores));
+ }
fence_helper()->EnqueueVulkanObjectCleanupForSubmittedWork(std::move(image_));
backend_texture_ = GrBackendTexture();
@@ -570,6 +589,10 @@ void ExternalVkImageBacking::AddSemaphoresToPendingListOrRelease(
}
}
+scoped_refptr<gfx::NativePixmap> ExternalVkImageBacking::GetNativePixmap() {
+ return image_->native_pixmap();
+}
+
void ExternalVkImageBacking::ReturnPendingSemaphoresWithFenceHelper(
std::vector<ExternalSemaphore> semaphores) {
std::move(semaphores.begin(), semaphores.end(),
@@ -756,6 +779,13 @@ ExternalVkImageBacking::ProduceSkia(
tracker);
}
+std::unique_ptr<SharedImageRepresentationOverlay>
+ExternalVkImageBacking::ProduceOverlay(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ return std::make_unique<ExternalVkImageOverlayRepresentation>(manager, this,
+ tracker);
+}
+
void ExternalVkImageBacking::InstallSharedMemory(
SharedMemoryRegionWrapper shared_memory_wrapper) {
DCHECK(!shared_memory_wrapper_.IsValid());
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index 27397700c98..7d48d672ae3 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -130,6 +130,7 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
// SharedImageBacking implementation.
void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap() override;
// Add semaphores to a pending list for reusing or being released immediately.
void AddSemaphoresToPendingListOrRelease(
@@ -160,6 +161,9 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
SharedImageManager* manager,
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) override;
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
private:
// Install a shared memory GMB to the backing.
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
index 246989e89d5..3a3109297b4 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_dawn_representation.cc
@@ -57,13 +57,24 @@ WGPUTexture ExternalVkImageDawnRepresentation::BeginAccess(
dawn_native::vulkan::ExternalImageDescriptorOpaqueFD descriptor = {};
descriptor.cTextureDescriptor = &texture_descriptor;
- descriptor.isCleared = IsCleared();
+ descriptor.isInitialized = IsCleared();
descriptor.allocationSize = backing_impl()->image()->device_size();
descriptor.memoryTypeIndex = backing_impl()->image()->memory_type_index();
descriptor.memoryFD = dup(memory_fd_.get());
- // TODO(http://crbug.com/dawn/200): We may not be obeying all of the rules
- // specified by Vulkan for external queue transfer barriers. Investigate this.
+ const GrBackendTexture& backend_texture = backing_impl()->backend_texture();
+ GrVkImageInfo image_info;
+ backend_texture.getVkImageInfo(&image_info);
+ // We should either be importing the image from the external queue, or it
+ // was just created with no queue ownership.
+ DCHECK(image_info.fCurrentQueueFamily == VK_QUEUE_FAMILY_IGNORED ||
+ image_info.fCurrentQueueFamily == VK_QUEUE_FAMILY_EXTERNAL);
+
+ // Note: This assumes the previous owner of the shared image did not do a
+ // layout transition on EndAccess, and saved the exported layout on the
+ // GrBackendTexture.
+ descriptor.releasedOldLayout = image_info.fImageLayout;
+ descriptor.releasedNewLayout = image_info.fImageLayout;
for (auto& external_semaphore : begin_access_semaphores_) {
descriptor.waitFDs.push_back(
@@ -71,13 +82,6 @@ WGPUTexture ExternalVkImageDawnRepresentation::BeginAccess(
}
texture_ = dawn_native::vulkan::WrapVulkanImage(device_, &descriptor);
-
- if (texture_) {
- // Keep a reference to the texture so that it stays valid (its content
- // might be destroyed).
- dawn_procs_.textureReference(texture_);
- }
-
return texture_;
}
@@ -87,22 +91,38 @@ void ExternalVkImageDawnRepresentation::EndAccess() {
}
// Grab the signal semaphore from dawn
- int signal_semaphore_fd =
- dawn_native::vulkan::ExportSignalSemaphoreOpaqueFD(device_, texture_);
-
- if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
- SetCleared();
+ dawn_native::vulkan::ExternalImageExportInfoOpaqueFD export_info;
+ if (!dawn_native::vulkan::ExportVulkanImage(
+ texture_, VK_IMAGE_LAYOUT_UNDEFINED, &export_info)) {
+ DLOG(ERROR) << "Failed to export Dawn Vulkan image.";
+ } else {
+ if (export_info.isInitialized) {
+ SetCleared();
+ }
+
+ // Exporting to VK_IMAGE_LAYOUT_UNDEFINED means no transition should be
+ // done. The old/new layouts are the same.
+ DCHECK_EQ(export_info.releasedOldLayout, export_info.releasedNewLayout);
+
+ // Save the layout on the GrBackendTexture. Other shared image
+ // representations read it from here.
+ GrBackendTexture backend_texture = backing_impl()->backend_texture();
+ backend_texture.setMutableState(GrBackendSurfaceMutableState(
+ export_info.releasedNewLayout, VK_QUEUE_FAMILY_EXTERNAL));
+
+ // TODO(enga): Handle waiting on multiple semaphores from dawn
+ DCHECK(export_info.semaphoreHandles.size() == 1);
+
+ // Wrap file descriptor in a handle
+ SemaphoreHandle handle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
+ base::ScopedFD(export_info.semaphoreHandles[0]));
+
+ auto semaphore = ExternalSemaphore::CreateFromHandle(
+ backing_impl()->context_provider(), std::move(handle));
+
+ backing_impl()->EndAccess(false, std::move(semaphore), false /* is_gl */);
}
- // Wrap file descriptor in a handle
- SemaphoreHandle handle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR,
- base::ScopedFD(signal_semaphore_fd));
-
- auto semaphore = ExternalSemaphore::CreateFromHandle(
- backing_impl()->context_provider(), std::move(handle));
-
- backing_impl()->EndAccess(false, std::move(semaphore), false /* is_gl */);
-
// Destroy the texture, signaling the semaphore in dawn
dawn_procs_.textureDestroy(texture_);
dawn_procs_.textureRelease(texture_);
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.h b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
index f736400c53b..2e77498c6b4 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
@@ -10,6 +10,7 @@
#include "gpu/command_buffer/service/external_vk_image_backing.h"
#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/gpu_gles2_export.h"
namespace gpu {
class SharedContextState;
@@ -20,7 +21,8 @@ class VulkanCommandPool;
// can be exported out of Vulkan and be used in GL. Synchronization between
// Vulkan and GL is done using VkSemaphores that are created with special flags
// that allow it to be exported out and shared with GL.
-class ExternalVkImageFactory : public SharedImageBackingFactory {
+class GPU_GLES2_EXPORT ExternalVkImageFactory
+ : public SharedImageBackingFactory {
public:
explicit ExternalVkImageFactory(
scoped_refptr<SharedContextState> context_state);
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc b/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc
new file mode 100644
index 00000000000..26fe5eca304
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory_unittest.cc
@@ -0,0 +1,405 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/external_vk_image_factory.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/callback_helpers.h"
+#include "components/viz/common/gpu/vulkan_in_process_context_provider.h"
+#include "gpu/command_buffer/service/external_vk_image_dawn_representation.h"
+#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/skia_utils.h"
+#include "gpu/config/gpu_test_config.h"
+#include "gpu/vulkan/init/vulkan_factory.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkImage.h"
+#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
+#include "ui/gl/buildflags.h"
+#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_surface.h"
+#include "ui/gl/init/gl_factory.h"
+
+#if BUILDFLAG(USE_DAWN)
+#include <dawn/dawn_proc.h>
+#include <dawn/webgpu_cpp.h>
+#include <dawn_native/DawnNative.h>
+#endif // BUILDFLAG(USE_DAWN)
+
+namespace gpu {
+namespace {
+
+class ExternalVkImageFactoryTest : public testing::Test {
+ protected:
+ bool VulkanSupported() const {
+ // crbug.com(941685, 1139366): Vulkan driver crashes on Linux FYI Release
+ // (AMD R7 240).
+ return !GPUTestBotConfig::CurrentConfigMatches("Linux AMD");
+ }
+ void SetUp() override {
+ if (!VulkanSupported()) {
+ return;
+ }
+ // Set up the Vulkan implementation and context provider.
+ vulkan_implementation_ = gpu::CreateVulkanImplementation();
+ DCHECK(vulkan_implementation_) << "Failed to create Vulkan implementation";
+
+ auto initialize_vulkan = vulkan_implementation_->InitializeVulkanInstance();
+ DCHECK(initialize_vulkan) << "Failed to initialize Vulkan implementation.";
+
+ vulkan_context_provider_ = viz::VulkanInProcessContextProvider::Create(
+ vulkan_implementation_.get());
+ DCHECK(vulkan_context_provider_)
+ << "Failed to create Vulkan context provider";
+
+ // Set up a GL context. We don't actually need it, but we can't make
+ // a SharedContextState without one.
+ gl_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ DCHECK(gl_surface_);
+ gl_context_ = gl::init::CreateGLContext(nullptr, gl_surface_.get(),
+ gl::GLContextAttribs());
+ DCHECK(gl_context_);
+ bool make_current_result = gl_context_->MakeCurrent(gl_surface_.get());
+ DCHECK(make_current_result);
+
+ scoped_refptr<gl::GLShareGroup> share_group = new gl::GLShareGroup();
+ context_state_ = base::MakeRefCounted<SharedContextState>(
+ std::move(share_group), gl_surface_, gl_context_,
+ false /* use_virtualized_gl_contexts */, base::DoNothing(),
+ GrContextType::kVulkan, vulkan_context_provider_.get());
+
+ GpuPreferences gpu_preferences = {};
+ GpuDriverBugWorkarounds workarounds = {};
+ context_state_->InitializeGrContext(gpu_preferences, workarounds, nullptr);
+
+ memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
+ shared_image_representation_factory_ =
+ std::make_unique<SharedImageRepresentationFactory>(
+ &shared_image_manager_, nullptr);
+ shared_image_factory_ =
+ std::make_unique<ExternalVkImageFactory>(context_state_);
+
+#if BUILDFLAG(USE_DAWN)
+ // Create a Dawn Vulkan device
+ dawn_instance_.DiscoverDefaultAdapters();
+
+ std::vector<dawn_native::Adapter> adapters = dawn_instance_.GetAdapters();
+ auto adapter_it = std::find_if(
+ adapters.begin(), adapters.end(), [](dawn_native::Adapter adapter) {
+ return adapter.GetBackendType() == dawn_native::BackendType::Vulkan;
+ });
+ ASSERT_NE(adapter_it, adapters.end());
+
+ DawnProcTable procs = dawn_native::GetProcs();
+ dawnProcSetProcs(&procs);
+
+ dawn_device_ = wgpu::Device::Acquire(adapter_it->CreateDevice());
+ DCHECK(dawn_device_) << "Failed to create Dawn device";
+#endif // BUILDFLAG(USE_DAWN)
+ }
+
+ void TearDown() override {
+#if BUILDFLAG(USE_DAWN)
+ dawn_device_ = wgpu::Device();
+ dawnProcSetProcs(nullptr);
+#endif // BUILDFLAG(USE_DAWN)
+ }
+
+ std::unique_ptr<VulkanImplementation> vulkan_implementation_;
+ scoped_refptr<viz::VulkanInProcessContextProvider> vulkan_context_provider_;
+
+ scoped_refptr<gl::GLSurface> gl_surface_;
+ scoped_refptr<gl::GLContext> gl_context_;
+ scoped_refptr<SharedContextState> context_state_;
+
+ SharedImageManager shared_image_manager_;
+ std::unique_ptr<MemoryTypeTracker> memory_type_tracker_;
+ std::unique_ptr<SharedImageRepresentationFactory>
+ shared_image_representation_factory_;
+ std::unique_ptr<ExternalVkImageFactory> shared_image_factory_;
+
+#if BUILDFLAG(USE_DAWN)
+ dawn_native::Instance dawn_instance_;
+ wgpu::Device dawn_device_;
+#endif // BUILDFLAG(USE_DAWN)
+};
+
+#if BUILDFLAG(USE_DAWN)
+
+TEST_F(ExternalVkImageFactoryTest, DawnWrite_SkiaVulkanRead) {
+ if (!VulkanSupported()) {
+ DLOG(ERROR) << "Test skipped because Vulkan isn't supported.";
+ return;
+ }
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(4, 4);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_DISPLAY | SHARED_IMAGE_USAGE_WEBGPU;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = shared_image_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space,
+ kTopLeft_GrSurfaceOrigin, kPremul_SkAlphaType, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ {
+ // Create a Dawn representation to clear the texture contents to a green.
+ auto dawn_representation =
+ shared_image_representation_factory_->ProduceDawn(mailbox,
+ dawn_device_.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_OutputAttachment,
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ ASSERT_TRUE(dawn_scoped_access);
+
+ wgpu::Texture texture(dawn_scoped_access->texture());
+ wgpu::RenderPassColorAttachmentDescriptor color_desc;
+ color_desc.attachment = texture.CreateView();
+ color_desc.resolveTarget = nullptr;
+ color_desc.loadOp = wgpu::LoadOp::Clear;
+ color_desc.storeOp = wgpu::StoreOp::Store;
+ color_desc.clearColor = {0, 255, 0, 255};
+
+ wgpu::RenderPassDescriptor renderPassDesc = {};
+ renderPassDesc.colorAttachmentCount = 1;
+ renderPassDesc.colorAttachments = &color_desc;
+ renderPassDesc.depthStencilAttachment = nullptr;
+
+ wgpu::CommandEncoder encoder = dawn_device_.CreateCommandEncoder();
+ wgpu::RenderPassEncoder pass = encoder.BeginRenderPass(&renderPassDesc);
+ pass.EndPass();
+ wgpu::CommandBuffer commands = encoder.Finish();
+
+ wgpu::Queue queue = dawn_device_.GetDefaultQueue();
+ queue.Submit(1, &commands);
+ }
+
+ EXPECT_TRUE(factory_ref->IsCleared());
+
+ {
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox,
+ context_state_.get());
+
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ auto skia_scoped_access = skia_representation->BeginScopedReadAccess(
+ &begin_semaphores, &end_semaphores);
+
+ context_state_->gr_context()->wait(begin_semaphores.size(),
+ begin_semaphores.data());
+
+ EXPECT_TRUE(skia_scoped_access);
+
+ auto* promise_texture = skia_scoped_access->promise_image_texture();
+ GrBackendTexture backend_texture = promise_texture->backendTexture();
+
+ EXPECT_TRUE(backend_texture.isValid());
+ EXPECT_EQ(size.width(), backend_texture.width());
+ EXPECT_EQ(size.height(), backend_texture.height());
+
+ // Create an Sk Image from GrBackendTexture.
+ auto sk_image = SkImage::MakeFromTexture(
+ context_state_->gr_context(), backend_texture, kTopLeft_GrSurfaceOrigin,
+ kRGBA_8888_SkColorType, kOpaque_SkAlphaType, nullptr);
+ EXPECT_TRUE(sk_image);
+
+ const SkImageInfo dst_info =
+ SkImageInfo::Make(size.width(), size.height(), kRGBA_8888_SkColorType,
+ kOpaque_SkAlphaType, nullptr);
+
+ const int num_pixels = size.width() * size.height();
+ std::vector<uint8_t> dst_pixels(num_pixels * 4);
+
+ // Read back pixels from Sk Image.
+ EXPECT_TRUE(sk_image->readPixels(dst_info, dst_pixels.data(),
+ dst_info.minRowBytes(), 0, 0));
+
+ for (int i = 0; i < num_pixels; i++) {
+ // Compare the pixel values.
+ const uint8_t* pixel = dst_pixels.data() + (i * 4);
+ EXPECT_EQ(pixel[0], 0);
+ EXPECT_EQ(pixel[1], 255);
+ EXPECT_EQ(pixel[2], 0);
+ EXPECT_EQ(pixel[3], 255);
+ }
+
+ GrFlushInfo flush_info;
+ flush_info.fNumSemaphores = end_semaphores.size();
+ flush_info.fSignalSemaphores = end_semaphores.data();
+ gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_.get(),
+ &flush_info);
+
+ context_state_->gr_context()->flush(flush_info);
+ context_state_->gr_context()->submit();
+ }
+}
+
+TEST_F(ExternalVkImageFactoryTest, SkiaVulkanWrite_DawnRead) {
+ if (!VulkanSupported()) {
+ DLOG(ERROR) << "Test skipped because Vulkan isn't supported.";
+ return;
+ }
+ // Create a backing using mailbox.
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ const auto format = viz::ResourceFormat::RGBA_8888;
+ const gfx::Size size(4, 4);
+ const auto color_space = gfx::ColorSpace::CreateSRGB();
+ const uint32_t usage = SHARED_IMAGE_USAGE_DISPLAY | SHARED_IMAGE_USAGE_WEBGPU;
+ const gpu::SurfaceHandle surface_handle = gpu::kNullSurfaceHandle;
+ auto backing = shared_image_factory_->CreateSharedImage(
+ mailbox, format, surface_handle, size, color_space,
+ kTopLeft_GrSurfaceOrigin, kPremul_SkAlphaType, usage,
+ false /* is_thread_safe */);
+ ASSERT_NE(backing, nullptr);
+
+ std::unique_ptr<SharedImageRepresentationFactoryRef> factory_ref =
+ shared_image_manager_.Register(std::move(backing),
+ memory_type_tracker_.get());
+
+ {
+ // Create a SkiaRepresentation
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox,
+ context_state_.get());
+
+ // Begin access for writing
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ auto skia_scoped_access = skia_representation->BeginScopedWriteAccess(
+ 1 /* final_msaa_count */,
+ SkSurfaceProps(0 /* flags */, kUnknown_SkPixelGeometry),
+ &begin_semaphores, &end_semaphores,
+ gpu::SharedImageRepresentation::AllowUnclearedAccess::kYes);
+
+ SkSurface* dest_surface = skia_scoped_access->surface();
+ dest_surface->wait(begin_semaphores.size(), begin_semaphores.data());
+ SkCanvas* dest_canvas = dest_surface->getCanvas();
+
+ // Color the top half blue, and the bottom half green
+ dest_canvas->drawRect(SkRect{0, 0, size.width(), size.height() / 2},
+ SkPaint(SkColors::kBlue));
+ dest_canvas->drawRect(
+ SkRect{0, size.height() / 2, size.width(), size.height()},
+ SkPaint(SkColors::kGreen));
+ skia_representation->SetCleared();
+
+ GrFlushInfo flush_info;
+ flush_info.fNumSemaphores = end_semaphores.size();
+ flush_info.fSignalSemaphores = end_semaphores.data();
+ gpu::AddVulkanCleanupTaskForSkiaFlush(vulkan_context_provider_.get(),
+ &flush_info);
+ dest_surface->flush(flush_info, skia_scoped_access->end_state());
+ if (skia_scoped_access->end_state()) {
+ context_state_->gr_context()->setBackendTextureState(
+ dest_surface->getBackendTexture(
+ SkSurface::BackendHandleAccess::kFlushRead_BackendHandleAccess),
+ *skia_scoped_access->end_state());
+ }
+ context_state_->gr_context()->submit();
+ }
+
+ {
+ // Create a Dawn representation
+ auto dawn_representation =
+ shared_image_representation_factory_->ProduceDawn(mailbox,
+ dawn_device_.Get());
+ ASSERT_TRUE(dawn_representation);
+
+ // Begin access to copy the data out. Skia should have initialized the
+ // contents.
+ auto dawn_scoped_access = dawn_representation->BeginScopedAccess(
+ WGPUTextureUsage_CopySrc,
+ SharedImageRepresentation::AllowUnclearedAccess::kNo);
+ ASSERT_TRUE(dawn_scoped_access);
+
+ wgpu::Texture src_texture(dawn_scoped_access->texture());
+
+ // Create a buffer to read back the texture data
+ wgpu::BufferDescriptor dst_buffer_desc = {};
+ dst_buffer_desc.usage =
+ wgpu::BufferUsage::CopyDst | wgpu::BufferUsage::MapRead;
+ dst_buffer_desc.size = 256 * size.height();
+ wgpu::Buffer dst_buffer = dawn_device_.CreateBuffer(&dst_buffer_desc);
+
+ // Encode the buffer copy
+ wgpu::CommandEncoder encoder = dawn_device_.CreateCommandEncoder();
+ {
+ wgpu::TextureCopyView src_copy_view = {};
+ src_copy_view.origin = {0, 0, 0};
+ src_copy_view.texture = src_texture;
+
+ wgpu::BufferCopyView dst_copy_view = {};
+ dst_copy_view.buffer = dst_buffer;
+ dst_copy_view.layout.bytesPerRow = 256;
+ dst_copy_view.layout.offset = 0;
+ dst_copy_view.layout.rowsPerImage = 0;
+
+ wgpu::Extent3D copy_extent = {size.width(), size.height(), 1};
+
+ encoder.CopyTextureToBuffer(&src_copy_view, &dst_copy_view, &copy_extent);
+ }
+
+ wgpu::CommandBuffer commands = encoder.Finish();
+ wgpu::Queue queue = dawn_device_.GetDefaultQueue();
+ queue.Submit(1, &commands);
+
+ // Map the buffer to read back data
+ bool done = false;
+ dst_buffer.MapAsync(
+ wgpu::MapMode::Read, 0, 256 * size.height(),
+ [](WGPUBufferMapAsyncStatus status, void* userdata) {
+ EXPECT_EQ(status, WGPUBufferMapAsyncStatus_Success);
+ *static_cast<bool*>(userdata) = true;
+ },
+ &done);
+
+ while (!done) {
+ base::PlatformThread::Sleep(base::TimeDelta::FromMicroseconds(100));
+ dawn_device_.Tick();
+ }
+
+ // Check the pixel data
+ const uint8_t* pixel_data =
+ static_cast<const uint8_t*>(dst_buffer.GetConstMappedRange());
+ for (int h = 0; h < size.height(); ++h) {
+ for (int w = 0; w < size.width(); ++w) {
+ const uint8_t* pixel = (pixel_data + h * 256) + w * 4;
+ if (h < size.height() / 2) {
+ EXPECT_EQ(pixel[0], 0);
+ EXPECT_EQ(pixel[1], 0);
+ EXPECT_EQ(pixel[2], 255);
+ EXPECT_EQ(pixel[3], 255);
+ } else {
+ EXPECT_EQ(pixel[0], 0);
+ EXPECT_EQ(pixel[1], 255);
+ EXPECT_EQ(pixel[2], 0);
+ EXPECT_EQ(pixel[3], 255);
+ }
+ }
+ }
+ }
+}
+
+#endif // BUILDFLAG(USE_DAWN)
+
+} // anonymous namespace
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc
new file mode 100644
index 00000000000..c5f79a83a9c
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.cc
@@ -0,0 +1,111 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/external_vk_image_overlay_representation.h"
+
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "gpu/vulkan/vulkan_implementation.h"
+
+namespace gpu {
+
+namespace {
+
+// Moves platform specific SemaphoreHandle instances to gfx::GpuFenceHandle.
+gfx::GpuFenceHandle SemaphoreHandleToGpuFenceHandle(SemaphoreHandle handle) {
+ gfx::GpuFenceHandle fence_handle;
+#if defined(OS_FUCHSIA)
+ // Fuchsia's Vulkan driver allows zx::event to be obtained from a
+ // VkSemaphore, which can then be used to submit present work, see
+ // https://fuchsia.dev/reference/fidl/fuchsia.ui.scenic.
+ fence_handle.owned_event = handle.TakeHandle();
+#elif defined(OS_POSIX)
+ fence_handle.owned_fd = handle.TakeHandle();
+#elif defined(OS_WIN)
+ fence_handle.owned_handle = handle.TakeHandle();
+#endif // defined(OS_FUCHSIA)
+ return fence_handle;
+}
+
+} // namespace
+
+ExternalVkImageOverlayRepresentation::ExternalVkImageOverlayRepresentation(
+ SharedImageManager* manager,
+ ExternalVkImageBacking* backing,
+ MemoryTypeTracker* tracker)
+ : gpu::SharedImageRepresentationOverlay(manager, backing, tracker),
+ vk_image_backing_(backing) {}
+
+ExternalVkImageOverlayRepresentation::~ExternalVkImageOverlayRepresentation() =
+ default;
+
+bool ExternalVkImageOverlayRepresentation::BeginReadAccess(
+ std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) {
+ DCHECK(read_begin_semaphores_.empty());
+ if (!vk_image_backing_->BeginAccess(/*readonly=*/true,
+ &read_begin_semaphores_,
+ /*is_gl=*/false)) {
+ return false;
+ }
+
+ // Create a |read_end_semaphore_| which will be signalled by the display.
+ read_end_semaphore_ =
+ vk_image_backing_->external_semaphore_pool()->GetOrCreateSemaphore();
+
+ GetAcquireFences(acquire_fences);
+ GetReleaseFences(release_fences);
+ return true;
+}
+
+void ExternalVkImageOverlayRepresentation::EndReadAccess() {
+ DCHECK(read_end_semaphore_);
+ vk_image_backing_->EndAccess(/*readonly=*/true,
+ std::move(read_end_semaphore_),
+ /*is_gl=*/false);
+
+ // All pending semaphores have been waited on directly or indirectly. They can
+ // be reused when the next submitted GPU work is done by GPU.
+ vk_image_backing_->ReturnPendingSemaphoresWithFenceHelper(
+ std::move(read_begin_semaphores_));
+ read_begin_semaphores_.clear();
+}
+
+gl::GLImage* ExternalVkImageOverlayRepresentation::GetGLImage() {
+ NOTREACHED();
+ return nullptr;
+}
+
+#if defined(OS_ANDROID)
+void ExternalVkImageOverlayRepresentation::NotifyOverlayPromotion(
+ bool promotion,
+ const gfx::Rect& bounds) {
+ NOTREACHED();
+}
+#endif
+
+void ExternalVkImageOverlayRepresentation::GetAcquireFences(
+ std::vector<gfx::GpuFence>* fences) {
+ const VkDevice& device = vk_image_backing_->context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ for (auto& semaphore : read_begin_semaphores_) {
+ DCHECK(semaphore.is_valid());
+ fences->emplace_back(SemaphoreHandleToGpuFenceHandle(
+ vk_image_backing_->vulkan_implementation()->GetSemaphoreHandle(
+ device, semaphore.GetVkSemaphore())));
+ }
+}
+
+void ExternalVkImageOverlayRepresentation::GetReleaseFences(
+ std::vector<gfx::GpuFence>* fences) {
+ DCHECK(read_end_semaphore_.is_valid());
+ const VkDevice& device = vk_image_backing_->context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ fences->emplace_back(SemaphoreHandleToGpuFenceHandle(
+ vk_image_backing_->vulkan_implementation()->GetSemaphoreHandle(
+ device, read_end_semaphore_.GetVkSemaphore())));
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.h
new file mode 100644
index 00000000000..d028b210766
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/external_vk_image_overlay_representation.h
@@ -0,0 +1,48 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_OVERLAY_REPRESENTATION_H_
+#define GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_OVERLAY_REPRESENTATION_H_
+
+#include "build/build_config.h"
+#include "gpu/command_buffer/service/external_vk_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+
+namespace gpu {
+
+class ExternalVkImageOverlayRepresentation
+ : public SharedImageRepresentationOverlay {
+ public:
+ ExternalVkImageOverlayRepresentation(gpu::SharedImageManager* manager,
+ ExternalVkImageBacking* backing,
+ gpu::MemoryTypeTracker* tracker);
+ ~ExternalVkImageOverlayRepresentation() override;
+ ExternalVkImageOverlayRepresentation(
+ const ExternalVkImageOverlayRepresentation&) = delete;
+ ExternalVkImageOverlayRepresentation& operator=(
+ const ExternalVkImageOverlayRepresentation&) = delete;
+
+ protected:
+ // SharedImageRepresentationOverlay implementation
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) override;
+ void EndReadAccess() override;
+ gl::GLImage* GetGLImage() override;
+
+#if defined(OS_ANDROID)
+ void NotifyOverlayPromotion(bool promotion, const gfx::Rect& bounds) override;
+#endif
+
+ private:
+ void GetAcquireFences(std::vector<gfx::GpuFence>* fences);
+ void GetReleaseFences(std::vector<gfx::GpuFence>* fences);
+
+ ExternalVkImageBacking* const vk_image_backing_;
+ std::vector<ExternalSemaphore> read_begin_semaphores_;
+ ExternalSemaphore read_end_semaphore_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_EXTERNAL_VK_IMAGE_OVERLAY_REPRESENTATION_H_
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index 380d617ba45..f562e80ad64 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -80,7 +80,7 @@ sk_sp<SkSurface> ExternalVkImageSkiaRepresentation::BeginWriteAccess(
access_mode_ = kWrite;
- // If Vulkan and GL (or Dawn) share the same memory backing, we need set
+ // If Vulkan/GL/Dawn share the same memory backing, we need set
// |end_state| VK_QUEUE_FAMILY_EXTERNAL, and then the caller will set the
// VkImage to VK_QUEUE_FAMILY_EXTERNAL before calling EndAccess().
if (backing_impl()->need_synchronization()) {
@@ -120,10 +120,10 @@ sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginReadAccess(
return nullptr;
}
- // If Vulkan and GLSet share the same memory backing, we need set |end_state|
+ // If Vulkan/GL/Dawn share the same memory backing, we need set |end_state|
// VK_QUEUE_FAMILY_EXTERNAL, and then the caller will set the VkImage to
// VK_QUEUE_FAMILY_EXTERNAL before calling EndAccess().
- if (!backing_impl()->use_separate_gl_texture()) {
+ if (backing_impl()->need_synchronization()) {
*end_state = std::make_unique<GrBackendSurfaceMutableState>(
VK_IMAGE_LAYOUT_UNDEFINED, VK_QUEUE_FAMILY_EXTERNAL);
}
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index d51c538b1fa..5f852567cfc 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -17,6 +17,7 @@
#include "base/strings/string_split.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/texture_definition.h"
#include "gpu/config/gpu_switches.h"
@@ -195,7 +196,7 @@ FeatureInfo::FeatureInfo(
.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] ==
gpu::kGpuFeatureStatusEnabled;
-#if defined(OS_CHROMEOS) || BUILDFLAG(IS_CHROMECAST)
+#if BUILDFLAG(IS_ASH) || BUILDFLAG(IS_CHROMECAST)
feature_flags_.chromium_image_ycbcr_420v = base::Contains(
gpu_feature_info.supported_buffer_formats_for_allocation_and_texturing,
gfx::BufferFormat::YUV_420_BIPLANAR);
@@ -203,10 +204,12 @@ FeatureInfo::FeatureInfo(
feature_flags_.chromium_image_ycbcr_420v = true;
#endif
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
feature_flags_.chromium_image_ycbcr_p010 = base::Contains(
gpu_feature_info.supported_buffer_formats_for_allocation_and_texturing,
gfx::BufferFormat::P010);
+#elif defined(OS_MAC)
+ feature_flags_.chromium_image_ycbcr_p010 = base::mac::IsAtLeastOS11();
#endif
}
@@ -1562,12 +1565,6 @@ void FeatureInfo::InitializeFeatures() {
validators_.g_l_state.AddValue(GL_MAX_DUAL_SOURCE_DRAW_BUFFERS_EXT);
}
-#if !defined(OS_MAC)
- if (workarounds_.ignore_egl_sync_failures) {
- gl::GLFenceEGL::SetIgnoreFailures();
- }
-#endif
-
if (workarounds_.avoid_egl_image_target_texture_reuse) {
TextureDefinition::AvoidEGLTargetTextureReuse();
}
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index 6099654615f..971a7dd506b 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -895,8 +895,10 @@ bool ValidateCopyTexFormatHelper(const FeatureInfo* feature_info,
// YUV formats are not valid for CopyTex[Sub]Image.
if (internal_format == GL_RGB_YCRCB_420_CHROMIUM ||
internal_format == GL_RGB_YCBCR_420V_CHROMIUM ||
+ internal_format == GL_RGB_YCBCR_P010_CHROMIUM ||
read_format == GL_RGB_YCRCB_420_CHROMIUM ||
- read_format == GL_RGB_YCBCR_420V_CHROMIUM) {
+ read_format == GL_RGB_YCBCR_420V_CHROMIUM ||
+ read_format == GL_RGB_YCBCR_P010_CHROMIUM) {
return false;
}
// Check we have compatible formats.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 2d5ca431c5c..5bc332abfa0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -20,7 +20,6 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/containers/flat_set.h"
@@ -3955,8 +3954,7 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
// always enabled and there is no way to disable it.
// Therefore, it seems OK to also always enable it on top of Desktop GL for
// both ES2 and ES3 contexts.
- if (!workarounds().disable_texture_cube_map_seamless &&
- gl_version_info().IsAtLeastGL(3, 2)) {
+ if (gl_version_info().IsAtLeastGL(3, 2)) {
api()->glEnableFn(GL_TEXTURE_CUBE_MAP_SEAMLESS);
}
@@ -6404,7 +6402,7 @@ void GLES2DecoderImpl::OnUseFramebuffer() const {
state_.viewport_width, state_.viewport_height);
}
- if (workarounds().restore_scissor_on_fbo_change || supports_dc_layers_) {
+ if (supports_dc_layers_) {
// The driver forgets the correct scissor when modifying the FBO binding.
gfx::Vector2d scissor_offset = GetBoundFramebufferDrawOffset();
api()->glScissorFn(state_.scissor_x + scissor_offset.x(),
@@ -6412,12 +6410,6 @@ void GLES2DecoderImpl::OnUseFramebuffer() const {
state_.scissor_width, state_.scissor_height);
}
- if (workarounds().restore_scissor_on_fbo_change) {
- // crbug.com/222018 - Also on QualComm, the flush here avoids flicker,
- // it's unclear how this bug works.
- api()->glFlushFn();
- }
-
if (workarounds().force_update_scissor_state_when_binding_fbo0 &&
GetBoundDrawFramebufferServiceId() == 0) {
// The theory is that FBO0 keeps some internal (in HW regs maybe?) scissor
@@ -18548,7 +18540,7 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
source_type, dest_binding_target, dest_level, dest_internal_format,
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE);
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+#if BUILDFLAG(IS_ASH) && defined(ARCH_CPU_X86_FAMILY)
// glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
// although opposite in Android.
// TODO(dshwang): After Mesa fixes this issue, remove this hack.
@@ -20338,6 +20330,7 @@ error::Error GLES2DecoderImpl::HandleSetActiveURLCHROMIUM(
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
#include "base/macros.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/command_buffer/service/gles2_cmd_decoder_autogen.h"
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 045c71984cb..c270d46c83d 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -978,6 +978,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
static constexpr const char* kOptionalFunctionalityExtensions[] = {
"GL_ANGLE_depth_texture",
"GL_ANGLE_framebuffer_multisample",
+ "GL_ANGLE_get_tex_level_parameter",
"GL_ANGLE_instanced_arrays",
"GL_ANGLE_memory_object_flags",
"GL_ANGLE_pack_reverse_row_order",
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index 63f4f8d7759..abd7a835a7f 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -933,7 +933,7 @@ error::Error DoScheduleCALayerCHROMIUM(GLuint contents_texture_id,
GLenum filter,
const GLfloat* bounds_rect);
error::Error DoScheduleCALayerInUseQueryCHROMIUM(
- GLuint n,
+ GLsizei n,
const volatile GLuint* textures);
error::Error DoScheduleDCLayerCHROMIUM(GLuint texture_0,
GLuint texture_1,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index f6edc0fbce7..a26b4417a31 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -4,7 +4,7 @@
#include "gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/ranges.h"
#include "base/strings/string_number_conversions.h"
@@ -2779,6 +2779,12 @@ error::Error GLES2DecoderPassthroughImpl::DoTexImage3D(GLenum target,
error::Error GLES2DecoderPassthroughImpl::DoTexParameterf(GLenum target,
GLenum pname,
GLfloat param) {
+ // Don't allow clients to modify the resource initialization state.
+ if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
+ InsertError(GL_INVALID_ENUM, "Invalid enum.");
+ return error::kNoError;
+ }
+
api()->glTexParameterfFn(target, pname, param);
return error::kNoError;
}
@@ -2787,6 +2793,12 @@ error::Error GLES2DecoderPassthroughImpl::DoTexParameterfv(
GLenum target,
GLenum pname,
const volatile GLfloat* params) {
+ // Don't allow clients to modify the resource initialization state.
+ if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
+ InsertError(GL_INVALID_ENUM, "Invalid enum.");
+ return error::kNoError;
+ }
+
std::array<GLfloat, 1> params_copy{{params[0]}};
api()->glTexParameterfvRobustANGLEFn(target, pname,
static_cast<GLsizei>(params_copy.size()),
@@ -2797,6 +2809,12 @@ error::Error GLES2DecoderPassthroughImpl::DoTexParameterfv(
error::Error GLES2DecoderPassthroughImpl::DoTexParameteri(GLenum target,
GLenum pname,
GLint param) {
+ // Don't allow clients to modify the resource initialization state.
+ if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
+ InsertError(GL_INVALID_ENUM, "Invalid enum.");
+ return error::kNoError;
+ }
+
api()->glTexParameteriFn(target, pname, param);
return error::kNoError;
}
@@ -2805,6 +2823,12 @@ error::Error GLES2DecoderPassthroughImpl::DoTexParameteriv(
GLenum target,
GLenum pname,
const volatile GLint* params) {
+ // Don't allow clients to modify the resource initialization state.
+ if (pname == GL_RESOURCE_INITIALIZED_ANGLE) {
+ InsertError(GL_INVALID_ENUM, "Invalid enum.");
+ return error::kNoError;
+ }
+
std::array<GLint, 1> params_copy{{params[0]}};
api()->glTexParameterivRobustANGLEFn(target, pname,
static_cast<GLsizei>(params_copy.size()),
@@ -4911,7 +4935,7 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerCHROMIUM(
}
error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerInUseQueryCHROMIUM(
- GLuint n,
+ GLsizei n,
const volatile GLuint* textures) {
// Validate that count is non-negative before allocating a vector
if (n < 0) {
@@ -4921,7 +4945,7 @@ error::Error GLES2DecoderPassthroughImpl::DoScheduleCALayerInUseQueryCHROMIUM(
std::vector<gl::GLSurface::CALayerInUseQuery> queries;
queries.reserve(n);
- for (GLuint i = 0; i < n; ++i) {
+ for (GLsizei i = 0; i < n; ++i) {
gl::GLImage* image = nullptr;
GLuint texture_id = textures[i];
if (texture_id) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index 7623eb54260..2c648473058 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -490,6 +490,7 @@ static const GLenum valid_image_internal_format_table[] = {
GL_RGB_YCRCB_420_CHROMIUM,
GL_RGB_YCBCR_422_CHROMIUM,
GL_RGB_YCBCR_420V_CHROMIUM,
+ GL_RGB_YCBCR_P010_CHROMIUM,
GL_RGBA,
};
@@ -1138,6 +1139,7 @@ static const GLenum
GL_RGB_YCRCB_420_CHROMIUM,
GL_RGB_YCBCR_422_CHROMIUM,
GL_RGB_YCBCR_420V_CHROMIUM,
+ GL_RGB_YCBCR_P010_CHROMIUM,
GL_R16_EXT,
};
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
index 97b6d43e986..e30519af832 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
@@ -11,58 +11,19 @@
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_peak_memory.h"
-// Macro to reduce code duplication when logging memory in
-// GpuCommandBufferMemoryTracker. This is needed as the UMA_HISTOGRAM_* macros
-// require a unique call-site per histogram (you can't funnel multiple strings
-// into the same call-site).
-#define GPU_COMMAND_BUFFER_MEMORY_BLOCK(category) \
- do { \
- uint64_t mb_used = size_ / (1024 * 1024); \
- switch (context_type_) { \
- case CONTEXT_TYPE_WEBGL1: \
- case CONTEXT_TYPE_WEBGL2: \
- case CONTEXT_TYPE_WEBGL2_COMPUTE: \
- UMA_HISTOGRAM_MEMORY_LARGE_MB("GPU.ContextMemory.WebGL." category, \
- mb_used); \
- break; \
- case CONTEXT_TYPE_OPENGLES2: \
- case CONTEXT_TYPE_OPENGLES3: \
- UMA_HISTOGRAM_MEMORY_LARGE_MB("GPU.ContextMemory.GLES." category, \
- mb_used); \
- break; \
- case CONTEXT_TYPE_WEBGPU: \
- break; \
- } \
- } while (false)
-
namespace gpu {
GpuCommandBufferMemoryTracker::GpuCommandBufferMemoryTracker(
CommandBufferId command_buffer_id,
uint64_t client_tracing_id,
- ContextType context_type,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
Observer* observer)
: command_buffer_id_(command_buffer_id),
client_tracing_id_(client_tracing_id),
- context_type_(context_type),
- memory_pressure_listener_(
- FROM_HERE,
- base::BindRepeating(
- &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
- base::Unretained(this))),
observer_(observer) {
- // Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically
- // via the provided |task_runner|.
- memory_stats_timer_.SetTaskRunner(std::move(task_runner));
- memory_stats_timer_.Start(
- FROM_HERE, base::TimeDelta::FromSeconds(30), this,
- &GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic);
}
-GpuCommandBufferMemoryTracker::~GpuCommandBufferMemoryTracker() {
- LogMemoryStatsShutdown();
-}
+GpuCommandBufferMemoryTracker::~GpuCommandBufferMemoryTracker() = default;
void GpuCommandBufferMemoryTracker::TrackMemoryAllocatedChange(int64_t delta) {
DCHECK(delta >= 0 || size_ >= static_cast<uint64_t>(-delta));
@@ -90,21 +51,4 @@ uint64_t GpuCommandBufferMemoryTracker::ContextGroupTracingId() const {
return command_buffer_id_.GetUnsafeValue();
}
-void GpuCommandBufferMemoryTracker::LogMemoryStatsPeriodic() {
- GPU_COMMAND_BUFFER_MEMORY_BLOCK("Periodic");
-}
-
-void GpuCommandBufferMemoryTracker::LogMemoryStatsShutdown() {
- GPU_COMMAND_BUFFER_MEMORY_BLOCK("Shutdown");
-}
-
-void GpuCommandBufferMemoryTracker::LogMemoryStatsPressure(
- base::MemoryPressureListener::MemoryPressureLevel pressure_level) {
- // Only log on CRITICAL memory pressure.
- if (pressure_level ==
- base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
- GPU_COMMAND_BUFFER_MEMORY_BLOCK("Pressure");
- }
-}
-
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
index 731b0b10ff5..4161af93490 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h
@@ -24,7 +24,6 @@ class GPU_GLES2_EXPORT GpuCommandBufferMemoryTracker : public MemoryTracker {
GpuCommandBufferMemoryTracker(
CommandBufferId command_buffer_id,
uint64_t client_tracing_id,
- ContextType context_type,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
Observer* observer);
~GpuCommandBufferMemoryTracker() override;
@@ -37,20 +36,10 @@ class GPU_GLES2_EXPORT GpuCommandBufferMemoryTracker : public MemoryTracker {
uint64_t ContextGroupTracingId() const override;
private:
- void LogMemoryStatsPeriodic();
- void LogMemoryStatsShutdown();
- void LogMemoryStatsPressure(
- base::MemoryPressureListener::MemoryPressureLevel pressure_level);
-
uint64_t size_ = 0;
const CommandBufferId command_buffer_id_;
const uint64_t client_tracing_id_;
- // Variables used in memory stat histogram logging.
- const ContextType context_type_;
- base::RepeatingTimer memory_stats_timer_;
- base::MemoryPressureListener memory_pressure_listener_;
-
MemoryTracker::Observer* const observer_;
DISALLOW_COPY_AND_ASSIGN(GpuCommandBufferMemoryTracker);
diff --git a/chromium/gpu/command_buffer/service/gpu_fence_manager_unittest.cc b/chromium/gpu/command_buffer/service/gpu_fence_manager_unittest.cc
index 20e5dee63c8..429d1acda60 100644
--- a/chromium/gpu/command_buffer/service/gpu_fence_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gpu_fence_manager_unittest.cc
@@ -5,7 +5,7 @@
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "build/build_config.h"
#include "gpu/command_buffer/service/error_state_mock.h"
#include "gpu/command_buffer/service/feature_info.h"
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.cc b/chromium/gpu/command_buffer/service/gpu_switches.cc
index 886e0f039f5..a12e9533485 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.cc
+++ b/chromium/gpu/command_buffer/service/gpu_switches.cc
@@ -77,16 +77,8 @@ const char kUseVulkan[] = "use-vulkan";
const char kVulkanImplementationNameNative[] = "native";
const char kVulkanImplementationNameSwiftshader[] = "swiftshader";
-// Forces to use protected memory for vulkan compositing.
-const char kEnforceVulkanProtectedMemory[] = "enforce-vulkan-protected-memory";
-
// Disables VK_KHR_surface extension. Instead of using swapchain, bitblt will be
// used for present render result on screen.
const char kDisableVulkanSurface[] = "disable-vulkan-surface";
-// Disables falling back to GL based hardware rendering if initializing Vulkan
-// fails. This is to allow tests to catch regressions in Vulkan.
-const char kDisableVulkanFallbackToGLForTesting[] =
- "disable-vulkan-fallback-to-gl-for-testing";
-
} // namespace switches
diff --git a/chromium/gpu/command_buffer/service/gpu_switches.h b/chromium/gpu/command_buffer/service/gpu_switches.h
index 1eca57d6eaf..b3b7d999b73 100644
--- a/chromium/gpu/command_buffer/service/gpu_switches.h
+++ b/chromium/gpu/command_buffer/service/gpu_switches.h
@@ -33,9 +33,7 @@ GPU_EXPORT extern const char kEmulateShaderPrecision[];
GPU_EXPORT extern const char kUseVulkan[];
GPU_EXPORT extern const char kVulkanImplementationNameNative[];
GPU_EXPORT extern const char kVulkanImplementationNameSwiftshader[];
-GPU_EXPORT extern const char kEnforceVulkanProtectedMemory[];
GPU_EXPORT extern const char kDisableVulkanSurface[];
-GPU_EXPORT extern const char kDisableVulkanFallbackToGLForTesting[];
} // namespace switches
diff --git a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
index ac70b9c2c1a..2c6a8a0146f 100644
--- a/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
+++ b/chromium/gpu/command_buffer/service/gr_cache_controller_unittest.cc
@@ -4,7 +4,7 @@
#include "gpu/command_buffer/service/gr_cache_controller.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/test/test_mock_time_task_runner.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/shared_context_state.h"
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
index 837750ffa17..f84a5a27d93 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
@@ -10,8 +10,10 @@
#include "base/android/android_hardware_buffer_compat.h"
#include "base/android/android_image_reader_compat.h"
+#include "base/android/build_info.h"
#include "base/android/jni_android.h"
#include "base/android/scoped_hardware_buffer_fence_sync.h"
+#include "base/debug/dump_without_crashing.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_functions.h"
@@ -20,8 +22,9 @@
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
#include "gpu/command_buffer/service/abstract_texture.h"
+#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/android/android_image_reader_utils.h"
-#include "ui/gl/android/android_surface_control_compat.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#include "ui/gl/gl_fence_android_native_fence_sync.h"
#include "ui/gl/gl_utils.h"
#include "ui/gl/scoped_binders.h"
@@ -62,11 +65,10 @@ bool IsSurfaceControl(TextureOwner::Mode mode) {
uint32_t NumRequiredMaxImages(TextureOwner::Mode mode) {
if (IsSurfaceControl(mode) ||
mode == TextureOwner::Mode::kAImageReaderInsecureMultithreaded) {
- DCHECK(!base::android::AndroidImageReader::LimitAImageReaderMaxSizeToOne());
+ DCHECK(!features::LimitAImageReaderMaxSizeToOne());
return 3;
}
- return base::android::AndroidImageReader::LimitAImageReaderMaxSizeToOne() ? 1
- : 2;
+ return features::LimitAImageReaderMaxSizeToOne() ? 1 : 2;
}
} // namespace
@@ -152,12 +154,16 @@ ImageReaderGLOwner::ImageReaderGLOwner(
media_status_t return_code = loader_.AImageReader_newWithUsage(
width, height, format, usage, max_images_, &reader);
if (return_code != AMEDIA_OK) {
- LOG(ERROR) << " Image reader creation failed.";
- if (return_code == AMEDIA_ERROR_INVALID_PARAMETER)
+ LOG(ERROR) << " Image reader creation failed on device model : "
+ << base::android::BuildInfo::GetInstance()->model()
+ << ". maxImages used is : " << max_images_;
+ base::debug::DumpWithoutCrashing();
+ if (return_code == AMEDIA_ERROR_INVALID_PARAMETER) {
LOG(ERROR) << "Either reader is null, or one or more of width, height, "
"format, maxImages arguments is not supported";
- else
+ } else {
LOG(ERROR) << "unknown error";
+ }
return;
}
DCHECK(reader);
@@ -244,7 +250,9 @@ gl::ScopedJavaSurface ImageReaderGLOwner::CreateJavaSurface() const {
DCHECK(j_surface);
// Get the scoped java surface that is owned externally.
- return gl::ScopedJavaSurface::AcquireExternalSurface(j_surface);
+ // TODO(1146071): use of JavaParamRef temporary to try to debug crash.
+ return gl::ScopedJavaSurface::AcquireExternalSurface(
+ base::android::JavaParamRef<jobject>(env, j_surface));
}
void ImageReaderGLOwner::UpdateTexImage() {
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc
index cd675ad2fe7..f3de9651ce7 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner_unittest.cc
@@ -13,6 +13,7 @@
#include "gpu/command_buffer/service/abstract_texture.h"
#include "gpu/command_buffer/service/image_reader_gl_owner.h"
#include "gpu/command_buffer/service/mock_abstract_texture.h"
+#include "gpu/config/gpu_finch_features.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_context_egl.h"
@@ -147,11 +148,10 @@ TEST_F(ImageReaderGLOwnerTest, DestructionWorksWithWrongContext) {
TEST_F(ImageReaderGLOwnerTest, MaxImageExpectation) {
if (!IsImageReaderSupported())
return;
+
EXPECT_EQ(static_cast<ImageReaderGLOwner*>(image_reader_.get())
->max_images_for_testing(),
- base::android::AndroidImageReader::LimitAImageReaderMaxSizeToOne()
- ? 1
- : 2);
+ features::LimitAImageReaderMaxSizeToOne() ? 1 : 2);
}
class ImageReaderGLOwnerSecureSurfaceControlTest
diff --git a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
index 863853ddcb8..db3a13f869c 100644
--- a/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/raster_cmd_validation_implementation_autogen.h
@@ -81,6 +81,7 @@ static const viz::ResourceFormat valid_viz_resource_format_table[] = {
viz::ResourceFormat::RGBX_8888, viz::ResourceFormat::BGRX_8888,
viz::ResourceFormat::RGBA_1010102, viz::ResourceFormat::BGRA_1010102,
viz::ResourceFormat::YVU_420, viz::ResourceFormat::YUV_420_BIPLANAR,
+ viz::ResourceFormat::P010,
};
Validators::Validators()
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 04e3061f74d..09522b82db6 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -59,17 +59,18 @@
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/wrapped_sk_image.h"
#include "gpu/vulkan/buildflags.h"
+#include "skia/ext/legacy_display_globals.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkDeferredDisplayListRecorder.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/core/SkTypeface.h"
-#include "third_party/skia/include/core/SkYUVAIndex.h"
+#include "third_party/skia/include/core/SkYUVAInfo.h"
#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrDirectContext.h"
-#include "third_party/skia/include/gpu/GrTypes.h"
+#include "third_party/skia/include/gpu/GrYUVABackendTextures.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/skia_util.h"
#include "ui/gl/gl_context.h"
@@ -635,8 +636,12 @@ class RasterDecoderImpl final : public RasterDecoder,
void FlushAndSubmitIfNecessary(
SkSurface* surface,
std::vector<GrBackendSemaphore> signal_semaphores) {
+ bool sync_cpu = gpu::ShouldVulkanSyncCpuForSkiaSubmit(
+ shared_context_state_->vk_context_provider());
if (signal_semaphores.empty()) {
surface->flush();
+ if (sync_cpu)
+ gr_context()->submit(sync_cpu);
return;
}
@@ -653,7 +658,7 @@ class RasterDecoderImpl final : public RasterDecoder,
// If the |signal_semaphores| is empty, we can deferred the queue
// submission.
DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
- gr_context()->submit();
+ gr_context()->submit(sync_cpu);
}
#if defined(NDEBUG)
@@ -1595,7 +1600,8 @@ void RasterDecoderImpl::SetUpForRasterCHROMIUMForTest() {
// backed surface for OOP raster commands.
auto info = SkImageInfo::MakeN32(10, 10, kPremul_SkAlphaType,
SkColorSpace::MakeSRGB());
- sk_surface_for_testing_ = SkSurface::MakeRaster(info);
+ SkSurfaceProps props = skia::LegacyDisplayGlobals::GetSkSurfaceProps();
+ sk_surface_for_testing_ = SkSurface::MakeRaster(info, &props);
sk_surface_ = sk_surface_for_testing_.get();
raster_canvas_ = sk_surface_->getCanvas();
}
@@ -2185,7 +2191,7 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
source_type, dest_target, dest_level, dest_internal_format, unpack_flip_y,
NeedsUnpackPremultiplyAlpha(*source_shared_image),
false /* unpack_unmultiply_alpha */, false /* dither */);
-#if defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY)
+#if BUILDFLAG(IS_ASH) && defined(ARCH_CPU_X86_FAMILY)
// glDrawArrays is faster than glCopyTexSubImage2D on IA Mesa driver,
// although opposite in Android.
// TODO(dshwang): After Mesa fixes this issue, remove this hack.
@@ -2735,19 +2741,14 @@ void RasterDecoderImpl::DoConvertYUVMailboxesToRGBINTERNAL(
SkISize dest_size =
SkISize::Make(dest_surface->width(), dest_surface->height());
-
- std::array<SkYUVAIndex, SkYUVAIndex::kIndexCount> yuva_indices;
- yuva_indices[SkYUVAIndex::kY_Index] = {0, SkColorChannel::kR};
- yuva_indices[SkYUVAIndex::kU_Index] = {1, SkColorChannel::kR};
- if (is_nv12)
- yuva_indices[SkYUVAIndex::kV_Index] = {1, SkColorChannel::kG};
- else
- yuva_indices[SkYUVAIndex::kV_Index] = {2, SkColorChannel::kR};
- yuva_indices[SkYUVAIndex::kA_Index] = {-1, SkColorChannel::kA};
-
- auto result_image = SkImage::MakeFromYUVATextures(
- gr_context(), src_color_space, yuva_textures.data(),
- yuva_indices.data(), dest_size, kTopLeft_GrSurfaceOrigin, nullptr);
+ SkYUVAInfo::PlanarConfig planar_config =
+ is_nv12 ? SkYUVAInfo::PlanarConfig::kY_UV_420
+ : SkYUVAInfo::PlanarConfig::kY_U_V_420;
+ SkYUVAInfo yuva_info(dest_size, planar_config, src_color_space);
+ GrYUVABackendTextures yuva_backend_textures(yuva_info, yuva_textures.data(),
+ kTopLeft_GrSurfaceOrigin);
+ auto result_image =
+ SkImage::MakeFromYUVATextures(gr_context(), yuva_backend_textures);
if (!result_image) {
LOCAL_SET_GL_ERROR(
GL_INVALID_OPERATION, "glConvertYUVMailboxesToRGB",
@@ -2883,9 +2884,7 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(GLuint sk_color,
uint32_t flags = 0;
SkSurfaceProps surface_props(flags, kUnknown_SkPixelGeometry);
if (can_use_lcd_text) {
- // LegacyFontHost will get LCD text and skia figures out what type to use.
- surface_props =
- SkSurfaceProps(flags, SkSurfaceProps::kLegacyFontHost_InitType);
+ surface_props = skia::LegacyDisplayGlobals::GetSkSurfaceProps(flags);
}
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
@@ -3259,6 +3258,7 @@ void RasterDecoderImpl::RestoreStateForAttrib(GLuint attrib_index,
// we can easily edit the non-auto generated parts right here in this file
// instead of having to edit some template or the code generator.
#include "base/macros.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/command_buffer/service/raster_decoder_autogen.h"
} // namespace raster
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
index f7ede913e85..f22df1e05b6 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
@@ -9,7 +9,7 @@
#include <string>
#include <utility>
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 7aa67521190..6c73190fce4 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -13,7 +13,7 @@
#include <utility>
#include <vector>
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index 87e49e407b2..d50013680c4 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/callback.h"
+#include "base/hash/md5_constexpr.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
@@ -20,6 +21,17 @@
namespace gpu {
+namespace {
+
+uint64_t GetTaskFlowId(uint32_t sequence_id, uint32_t order_num) {
+ // Xor with a mask to ensure that the flow id does not collide with non-gpu
+ // tasks.
+ static constexpr uint64_t kMask = base::MD5Hash64Constexpr("gpu::Scheduler");
+ return kMask ^ (sequence_id) ^ (static_cast<uint64_t>(order_num) << 32);
+}
+
+} // namespace
+
Scheduler::Task::Task(SequenceId sequence_id,
base::OnceClosure closure,
std::vector<SyncToken> sync_token_fences)
@@ -166,6 +178,9 @@ void Scheduler::Sequence::ContinueTask(base::OnceClosure closure) {
uint32_t Scheduler::Sequence::ScheduleTask(base::OnceClosure closure) {
uint32_t order_num = order_data_->GenerateUnprocessedOrderNumber();
+ TRACE_EVENT_WITH_FLOW0("gpu,toplevel.flow", "Scheduler::ScheduleTask",
+ GetTaskFlowId(sequence_id_.value(), order_num),
+ TRACE_EVENT_FLAG_FLOW_OUT);
tasks_.push_back({std::move(closure), order_num});
return order_num;
}
@@ -524,7 +539,6 @@ void Scheduler::RunNextTask() {
SchedulingState state = scheduling_queue_.back();
scheduling_queue_.pop_back();
- TRACE_EVENT1("gpu", "Scheduler::RunNextTask", "state", state.AsValue());
base::ElapsedTimer task_timer;
Sequence* sequence = GetSequence(state.sequence_id);
@@ -534,6 +548,10 @@ void Scheduler::RunNextTask() {
uint32_t order_num = sequence->BeginTask(&closure);
DCHECK_EQ(order_num, state.order_num);
+ TRACE_EVENT_WITH_FLOW1("gpu,toplevel.flow", "Scheduler::RunNextTask",
+ GetTaskFlowId(state.sequence_id.value(), order_num),
+ TRACE_EVENT_FLAG_FLOW_IN, "state", state.AsValue());
+
// Begin/FinishProcessingOrderNumber must be called with the lock released
// because they can renter the scheduler in Enable/DisableSequence.
scoped_refptr<SyncPointOrderData> order_data = sequence->order_data();
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index 840dc83809d..ced86d49725 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -163,6 +163,8 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
command_line->HasSwitch(switches::kEnableUnsafeWebGPU);
gpu_preferences.enable_dawn_backend_validation =
command_line->HasSwitch(switches::kEnableDawnBackendValidation);
+ gpu_preferences.disable_dawn_robustness =
+ command_line->HasSwitch(switches::kDisableDawnRobustness);
gpu_preferences.gr_context_type = ParseGrContextType();
gpu_preferences.use_vulkan = ParseVulkanImplementationName(command_line);
gpu_preferences.disable_vulkan_surface =
@@ -183,14 +185,20 @@ GrContextType ParseGrContextType() {
return base::FeatureList::IsEnabled(features::kMetal) ? GrContextType::kMetal
: GrContextType::kGL;
#else
- return base::FeatureList::IsEnabled(features::kVulkan)
- ? GrContextType::kVulkan
- : GrContextType::kGL;
+ return features::IsUsingVulkan() ? GrContextType::kVulkan
+ : GrContextType::kGL;
#endif
}
VulkanImplementationName ParseVulkanImplementationName(
const base::CommandLine* command_line) {
+#if defined(OS_ANDROID)
+ if (command_line->HasSwitch(switches::kWebViewDrawFunctorUsesVulkan) &&
+ base::FeatureList::IsEnabled(features::kWebViewVulkan)) {
+ return VulkanImplementationName::kForcedNative;
+ }
+#endif
+
if (command_line->HasSwitch(switches::kUseVulkan)) {
auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan);
if (value.empty() || value == switches::kVulkanImplementationNameNative) {
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index eaf0b3a4a0c..875211661fe 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -17,6 +17,7 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/config/gpu_finch_features.h"
#include "gpu/config/skia_limits.h"
#include "gpu/vulkan/buildflags.h"
#include "skia/buildflags.h"
@@ -33,10 +34,6 @@
#include "gpu/vulkan/vulkan_device_queue.h"
#endif
-#if defined(OS_ANDROID)
-#include "gpu/config/gpu_finch_features.h"
-#endif
-
#if defined(OS_FUCHSIA)
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#endif
@@ -178,12 +175,10 @@ SharedContextState::SharedContextState(
case GrContextType::kVulkan:
if (vk_context_provider_) {
#if BUILDFLAG(ENABLE_VULKAN)
- gr_context_ = vk_context_provider_->GetGrContext();
external_semaphore_pool_ =
std::make_unique<ExternalSemaphorePool>(this);
#endif
use_virtualized_gl_contexts_ = false;
- DCHECK(gr_context_);
}
break;
case GrContextType::kMetal:
@@ -271,6 +266,16 @@ bool SharedContextState::InitializeGrContext(
DetermineGrCacheLimitsFromAvailableMemory(&max_resource_cache_bytes,
&glyph_cache_max_texture_bytes);
+ // If you make any changes to the GrContext::Options here that could
+ // affect text rendering, make sure to match the capabilities initialized
+ // in GetCapabilities and ensuring these are also used by the
+ // PaintOpBufferSerializer.
+ GrContextOptions options = GetDefaultGrContextOptions(gr_context_type_);
+ options.fPersistentCache = cache;
+ options.fShaderErrorHandler = this;
+ if (gpu_preferences.force_max_texture_size)
+ options.fMaxTextureSizeOverride = gpu_preferences.force_max_texture_size;
+
if (gr_context_type_ == GrContextType::kGL) {
DCHECK(context_->IsCurrent(nullptr));
bool use_version_es2 = false;
@@ -296,25 +301,32 @@ bool SharedContextState::InitializeGrContext(
glProgramBinary(program, binaryFormat, binary, length);
};
}
- // If you make any changes to the GrContext::Options here that could
- // affect text rendering, make sure to match the capabilities initialized
- // in GetCapabilities and ensuring these are also used by the
- // PaintOpBufferSerializer.
- GrContextOptions options = GetDefaultGrContextOptions(GrContextType::kGL);
options.fDriverBugWorkarounds =
GrDriverBugWorkarounds(workarounds.ToIntSet());
- options.fPersistentCache = cache;
options.fAvoidStencilBuffers = workarounds.avoid_stencil_buffers;
if (workarounds.disable_program_disk_cache) {
options.fShaderCacheStrategy =
GrContextOptions::ShaderCacheStrategy::kBackendSource;
}
- options.fShaderErrorHandler = this;
- if (gpu_preferences.force_max_texture_size)
- options.fMaxTextureSizeOverride = gpu_preferences.force_max_texture_size;
options.fPreferExternalImagesOverES3 = true;
owned_gr_context_ = GrDirectContext::MakeGL(std::move(interface), options);
gr_context_ = owned_gr_context_.get();
+ } else if (gr_context_type_ == GrContextType::kVulkan) {
+#if BUILDFLAG(ENABLE_VULKAN)
+ if (vk_context_provider_) {
+ // TODO(vasilyt): Remove this if there is no problem with caching.
+ if (!base::FeatureList::IsEnabled(
+ features::kEnableGrShaderCacheForVulkan))
+ options.fPersistentCache = nullptr;
+
+ if (!vk_context_provider_->InitializeGrContext(options)) {
+ LOG(ERROR) << "Failed to initialize GrContext for Vulkan.";
+ return false;
+ }
+ gr_context_ = vk_context_provider_->GetGrContext();
+ DCHECK(gr_context_);
+ }
+#endif
}
if (!gr_context_) {
diff --git a/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc b/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc
index d1cfaae8ad1..54d2dae812d 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state_unittest.cc
@@ -9,7 +9,7 @@
#include <string>
#include <utility>
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/memory/ptr_util.h"
#include "gpu/command_buffer/common/gles2_cmd_utils.h"
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index 45558ea3b6c..1aa3eabf53c 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -38,9 +38,9 @@
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/vulkan/vulkan_image.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
-#include "ui/gl/android/android_surface_control_compat.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence_android_native_fence_sync.h"
#include "ui/gl/gl_gl_api_implementation.h"
@@ -227,7 +227,8 @@ class SharedImageRepresentationOverlayAHB
NOTREACHED();
}
- bool BeginReadAccess() override {
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) override {
gl_image_ = ahb_backing()->BeginOverlayAccess();
return !!gl_image_;
}
@@ -240,7 +241,6 @@ class SharedImageRepresentationOverlayAHB
}
gl::GLImage* GetGLImage() override { return gl_image_; }
- std::unique_ptr<gfx::GpuFence> GetReadFence() override { return nullptr; }
gl::GLImage* gl_image_ = nullptr;
};
@@ -442,6 +442,7 @@ void SharedImageBackingAHB::EndOverlayAccess() {
SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info) {
+ DCHECK(base::AndroidHardwareBufferCompat::IsSupportAvailable());
scoped_refptr<gles2::FeatureInfo> feature_info =
new gles2::FeatureInfo(workarounds, gpu_feature_info);
feature_info->Initialize(ContextType::CONTEXT_TYPE_OPENGLES2, false,
@@ -602,7 +603,7 @@ std::unique_ptr<SharedImageBacking> SharedImageBackingFactoryAHB::MakeBacking(
hwb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
if (usage & SHARED_IMAGE_USAGE_SCANOUT)
- hwb_desc.usage |= gl::SurfaceControl::RequiredUsage();
+ hwb_desc.usage |= gfx::SurfaceControl::RequiredUsage();
// Add WRITE usage as we'll it need to upload data
if (!pixel_data.empty())
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
index ba1ea3ff135..9abac17a812 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
@@ -6,7 +6,7 @@
#include "base/android/android_hardware_buffer_compat.h"
#include "base/android/scoped_hardware_buffer_fence_sync.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
index 02d5d09af1c..b47d4bc0245 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.cc
@@ -269,6 +269,7 @@ SharedImageBackingFactoryD3D::CreateSwapChain(
nullptr /* d3d11_texture */, base::win::ScopedHandle());
if (!back_buffer_backing)
return {nullptr, nullptr};
+ back_buffer_backing->SetCleared();
auto front_buffer_backing = MakeBacking(
front_buffer_mailbox, format, size, color_space, surface_origin,
@@ -276,6 +277,7 @@ SharedImageBackingFactoryD3D::CreateSwapChain(
nullptr /* d3d11_texture */, base::win::ScopedHandle());
if (!front_buffer_backing)
return {nullptr, nullptr};
+ front_buffer_backing->SetCleared();
return {std::move(front_buffer_backing), std::move(back_buffer_backing)};
}
@@ -420,10 +422,14 @@ SharedImageBackingFactoryD3D::CreateSharedImage(
return nullptr;
}
- return MakeBacking(mailbox, viz::GetResourceFormat(format), size, color_space,
- surface_origin, alpha_type, usage, /*swap_chain=*/nullptr,
- /*buffer_index=*/0, std::move(d3d11_texture),
- std::move(handle.dxgi_handle));
+ auto backing =
+ MakeBacking(mailbox, viz::GetResourceFormat(format), size, color_space,
+ surface_origin, alpha_type, usage, /*swap_chain=*/nullptr,
+ /*buffer_index=*/0, std::move(d3d11_texture),
+ std::move(handle.dxgi_handle));
+ if (backing)
+ backing->SetCleared();
+ return backing;
}
// Returns true if the specified GpuMemoryBufferType can be imported using
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
index 3a8f9b8608f..f5e2fcafbc4 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d.h
@@ -102,8 +102,9 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryD3D
private:
// Wraps the optional swap chain buffer (front buffer/back buffer) and texture
- // into GLimage and creates a GL texture and stores it as gles2::Texture or as
- // gles2::TexturePassthrough in the backing that is created.
+ // into GLimage and gles2::TexturePassthrough in the backing that is created.
+ // The backing isn't assumed to be cleared so it's the caller's responsibility
+ // to mark the backing as cleared using SetCleared()/SetClearedRect().
std::unique_ptr<SharedImageBacking> MakeBacking(
const Mailbox& mailbox,
viz::ResourceFormat format,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
index 82025f30646..54682c56be9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_d3d_unittest.cc
@@ -7,7 +7,7 @@
#include <memory>
#include <utility>
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -178,8 +178,11 @@ TEST_F(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) {
auto backings = shared_image_factory_->CreateSwapChain(
front_buffer_mailbox, back_buffer_mailbox, format, size, color_space,
surface_origin, alpha_type, usage);
- EXPECT_TRUE(backings.front_buffer);
- EXPECT_TRUE(backings.back_buffer);
+ ASSERT_TRUE(backings.front_buffer);
+ EXPECT_TRUE(backings.front_buffer->IsCleared());
+
+ ASSERT_TRUE(backings.back_buffer);
+ EXPECT_TRUE(backings.back_buffer->IsCleared());
std::unique_ptr<SharedImageRepresentationFactoryRef> back_factory_ref =
shared_image_manager_.Register(std::move(backings.back_buffer),
@@ -188,19 +191,16 @@ TEST_F(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) {
shared_image_manager_.Register(std::move(backings.front_buffer),
memory_type_tracker_.get());
- GLuint back_texture_id, front_texture_id = 0u;
- gl::GLImageD3D *back_image, *front_image = 0u;
-
auto back_texture = shared_image_representation_factory_
->ProduceGLTexturePassthrough(back_buffer_mailbox)
->GetTexturePassthrough();
ASSERT_TRUE(back_texture);
EXPECT_EQ(back_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
- back_texture_id = back_texture->service_id();
+ GLuint back_texture_id = back_texture->service_id();
EXPECT_NE(back_texture_id, 0u);
- back_image = gl::GLImageD3D::FromGLImage(
+ auto* back_image = gl::GLImageD3D::FromGLImage(
back_texture->GetLevelImage(GL_TEXTURE_2D, 0));
auto front_texture = shared_image_representation_factory_
@@ -209,10 +209,10 @@ TEST_F(SharedImageBackingFactoryD3DTestSwapChain, CreateAndPresentSwapChain) {
ASSERT_TRUE(front_texture);
EXPECT_EQ(front_texture->target(), static_cast<unsigned>(GL_TEXTURE_2D));
- front_texture_id = front_texture->service_id();
+ GLuint front_texture_id = front_texture->service_id();
EXPECT_NE(front_texture_id, 0u);
- front_image = gl::GLImageD3D::FromGLImage(
+ auto* front_image = gl::GLImageD3D::FromGLImage(
front_texture->GetLevelImage(GL_TEXTURE_2D, 0));
ASSERT_TRUE(back_image);
@@ -569,7 +569,7 @@ TEST_F(SharedImageBackingFactoryD3DTest, Dawn_SkiaGL) {
SharedImageRepresentation::AllowUnclearedAccess::kYes);
ASSERT_TRUE(scoped_access);
- wgpu::Texture texture = wgpu::Texture::Acquire(scoped_access->texture());
+ wgpu::Texture texture(scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
@@ -686,8 +686,7 @@ TEST_F(SharedImageBackingFactoryD3DTest, GL_Dawn_Skia_UnclearTexture) {
SharedImageRepresentation::AllowUnclearedAccess::kYes);
ASSERT_TRUE(dawn_scoped_access);
- wgpu::Texture texture =
- wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::Texture texture(dawn_scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
color_desc.resolveTarget = nullptr;
@@ -771,8 +770,7 @@ TEST_F(SharedImageBackingFactoryD3DTest, UnclearDawn_SkiaFails) {
SharedImageRepresentation::AllowUnclearedAccess::kYes);
ASSERT_TRUE(dawn_scoped_access);
- wgpu::Texture texture =
- wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::Texture texture(dawn_scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
color_desc.resolveTarget = nullptr;
@@ -908,6 +906,7 @@ TEST_F(SharedImageBackingFactoryD3DTest, CreateSharedImageFromHandle) {
EXPECT_EQ(backing->surface_origin(), surface_origin);
EXPECT_EQ(backing->alpha_type(), alpha_type);
EXPECT_EQ(backing->mailbox(), mailbox);
+ EXPECT_TRUE(backing->IsCleared());
SharedImageBackingD3D* backing_d3d =
static_cast<SharedImageBackingD3D*>(backing.get());
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index 276317a3f26..5ab338b8cb7 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -6,7 +6,7 @@
#include <thread>
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/optional.h"
#include "base/strings/stringprintf.h"
#include "build/build_config.h"
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index 78ebc2f1ca7..d7ba9665274 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -122,18 +122,11 @@ class SharedImageRepresentationDawnIOSurface
dawn_native::metal::ExternalImageDescriptorIOSurface descriptor;
descriptor.cTextureDescriptor = &texture_descriptor;
- descriptor.isCleared = IsCleared();
+ descriptor.isInitialized = IsCleared();
descriptor.ioSurface = io_surface_.get();
descriptor.plane = 0;
texture_ = dawn_native::metal::WrapIOSurface(device_, &descriptor);
-
- if (texture_) {
- // Keep a reference to the texture so that it stays valid (its content
- // might be destroyed).
- dawn_procs_.textureReference(texture_);
- }
-
return texture_;
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
index 8fc0c73f8a1..e7f6833e042 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
@@ -7,7 +7,7 @@
#include <memory>
#include <utility>
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/mailbox_manager_impl.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -416,7 +416,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
WGPUTextureUsage_OutputAttachment,
SharedImageRepresentation::AllowUnclearedAccess::kYes);
ASSERT_TRUE(scoped_access);
- wgpu::Texture texture = wgpu::Texture::Acquire(scoped_access->texture());
+ wgpu::Texture texture(scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
@@ -540,8 +540,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, GL_Dawn_Skia_UnclearTexture) {
SharedImageRepresentation::AllowUnclearedAccess::kYes);
ASSERT_TRUE(dawn_scoped_access);
- wgpu::Texture texture =
- wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::Texture texture(dawn_scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
color_desc.resolveTarget = nullptr;
@@ -623,8 +622,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, UnclearDawn_SkiaFails) {
SharedImageRepresentation::AllowUnclearedAccess::kYes);
ASSERT_TRUE(dawn_scoped_access);
- wgpu::Texture texture =
- wgpu::Texture::Acquire(dawn_scoped_access->texture());
+ wgpu::Texture texture(dawn_scoped_access->texture());
wgpu::RenderPassColorAttachmentDescriptor color_desc;
color_desc.attachment = texture.CreateView();
color_desc.resolveTarget = nullptr;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc
index 0318fd9d783..e2e3041bad6 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.cc
@@ -248,7 +248,13 @@ SharedImageRepresentationOverlayImpl::SharedImageRepresentationOverlayImpl(
SharedImageRepresentationOverlayImpl::~SharedImageRepresentationOverlayImpl() =
default;
-bool SharedImageRepresentationOverlayImpl::BeginReadAccess() {
+bool SharedImageRepresentationOverlayImpl::BeginReadAccess(
+ std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) {
+ auto* gl_backing = static_cast<SharedImageBackingGLImage*>(backing());
+ std::unique_ptr<gfx::GpuFence> fence = gl_backing->GetLastWriteGpuFence();
+ if (fence)
+ acquire_fences->push_back(std::move(*fence));
return true;
}
@@ -258,12 +264,6 @@ gl::GLImage* SharedImageRepresentationOverlayImpl::GetGLImage() {
return gl_image_.get();
}
-std::unique_ptr<gfx::GpuFence>
-SharedImageRepresentationOverlayImpl::GetReadFence() {
- auto* gl_backing = static_cast<SharedImageBackingGLImage*>(backing());
- return gl_backing->GetLastWriteGpuFence();
-}
-
///////////////////////////////////////////////////////////////////////////////
// SharedImageBackingGLImage
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h
index 39a7b45a013..4142204de43 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_gl_image.h
@@ -131,10 +131,10 @@ class SharedImageRepresentationOverlayImpl
~SharedImageRepresentationOverlayImpl() override;
private:
- bool BeginReadAccess() override;
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) override;
void EndReadAccess() override;
gl::GLImage* GetGLImage() override;
- std::unique_ptr<gfx::GpuFence> GetReadFence() override;
scoped_refptr<gl::GLImage> gl_image_;
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index c671ee2f0b4..528b0b1e678 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -9,6 +9,7 @@
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
@@ -40,7 +41,7 @@
#include "gpu/vulkan/vulkan_device_queue.h"
#elif defined(OS_MAC)
#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
-#elif defined(OS_CHROMEOS)
+#elif BUILDFLAG(IS_ASH)
#include "gpu/command_buffer/service/shared_image_backing_factory_ozone.h"
#endif
@@ -56,6 +57,7 @@
#endif // defined(OS_FUCHSIA)
#if defined(OS_ANDROID)
+#include "base/android/android_hardware_buffer_compat.h"
#include "base/android/scoped_hardware_buffer_fence_sync.h"
#include "gpu/command_buffer/service/shared_image_backing_scoped_hardware_buffer_fence_sync.h"
#endif
@@ -136,7 +138,7 @@ SharedImageFactory::SharedImageFactory(
interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
workarounds, gpu_feature_info);
}
- } else {
+ } else if (base::AndroidHardwareBufferCompat::IsSupportAvailable()) {
interop_backing_factory_ = std::make_unique<SharedImageBackingFactoryAHB>(
workarounds, gpu_feature_info);
}
@@ -144,7 +146,7 @@ SharedImageFactory::SharedImageFactory(
// OSX
DCHECK(gr_context_type_ == GrContextType::kGL ||
gr_context_type_ == GrContextType::kMetal);
-#elif defined(OS_CHROMEOS)
+#elif BUILDFLAG(IS_ASH)
if (gr_context_type_ == GrContextType::kVulkan) {
interop_backing_factory_ =
std::make_unique<SharedImageBackingFactoryOzone>(context_state);
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc
index e5ddefc7f4a..7d44ba8a821 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc
@@ -229,10 +229,12 @@ SharedImageRepresentationOverlay::ScopedReadAccess::ScopedReadAccess(
util::PassKey<SharedImageRepresentationOverlay> pass_key,
SharedImageRepresentationOverlay* representation,
gl::GLImage* gl_image,
- std::unique_ptr<gfx::GpuFence> fence)
+ std::vector<gfx::GpuFence> acquire_fences,
+ std::vector<gfx::GpuFence> release_fences)
: ScopedAccessBase(representation),
gl_image_(gl_image),
- fence_(std::move(fence)) {}
+ acquire_fences_(std::move(acquire_fences)),
+ release_fences_(std::move(release_fences)) {}
SharedImageRepresentationOverlay::ScopedReadAccess::~ScopedReadAccess() {
representation()->EndReadAccess();
@@ -245,14 +247,17 @@ SharedImageRepresentationOverlay::BeginScopedReadAccess(bool needs_gl_image) {
return nullptr;
}
- if (!BeginReadAccess())
+ std::vector<gfx::GpuFence> acquire_fences;
+ std::vector<gfx::GpuFence> release_fences;
+ if (!BeginReadAccess(&acquire_fences, &release_fences))
return nullptr;
backing()->OnReadSucceeded();
return std::make_unique<ScopedReadAccess>(
util::PassKey<SharedImageRepresentationOverlay>(), this,
- needs_gl_image ? GetGLImage() : nullptr, GetReadFence());
+ needs_gl_image ? GetGLImage() : nullptr, std::move(acquire_fences),
+ std::move(release_fences));
}
SharedImageRepresentationDawn::ScopedAccess::ScopedAccess(
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h
index 2ca5a057479..2caf5eeafa8 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.h
@@ -364,6 +364,8 @@ class GPU_GLES2_EXPORT SharedImageRepresentationDawn
WGPUTexture texture);
~ScopedAccess();
+ // Get the unowned texture handle. The caller should take a reference
+ // if necessary by doing wgpu::Texture texture(access->texture());
WGPUTexture texture() const { return texture_; }
private:
@@ -398,16 +400,23 @@ class GPU_GLES2_EXPORT SharedImageRepresentationOverlay
ScopedReadAccess(util::PassKey<SharedImageRepresentationOverlay> pass_key,
SharedImageRepresentationOverlay* representation,
gl::GLImage* gl_image,
- std::unique_ptr<gfx::GpuFence> fence);
+ std::vector<gfx::GpuFence> acquire_fences,
+ std::vector<gfx::GpuFence> release_fences);
~ScopedReadAccess();
gl::GLImage* gl_image() const { return gl_image_; }
- std::unique_ptr<gfx::GpuFence> TakeFence() { return std::move(fence_); }
+ std::vector<gfx::GpuFence> TakeAcquireFences() {
+ return std::move(acquire_fences_);
+ }
+ std::vector<gfx::GpuFence> TakeReleaseFences() {
+ return std::move(release_fences_);
+ }
private:
- gl::GLImage* gl_image_;
- std::unique_ptr<gfx::GpuFence> fence_;
+ gl::GLImage* const gl_image_;
+ std::vector<gfx::GpuFence> acquire_fences_;
+ std::vector<gfx::GpuFence> release_fences_;
};
#if defined(OS_ANDROID)
@@ -420,7 +429,16 @@ class GPU_GLES2_EXPORT SharedImageRepresentationOverlay
protected:
// TODO(weiliangc): Currently this only handles Android pre-SurfaceControl
// case. Add appropriate fence later.
- virtual bool BeginReadAccess() = 0;
+
+ // Notifies the backing that an access will start. Returns false if there is a
+ // conflict. Otherwise, returns true and:
+ // - Adds gpu fences to |acquire_fences| that should be waited on before the
+ // SharedImage is ready to be displayed. These fences are fired when the gpu
+ // has finished writing.
+ // - Adds gpu fences to |release_fences| that are signalled by the display
+ // after pixmap has been displayed and is ready for reuse.
+ virtual bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) = 0;
virtual void EndReadAccess() = 0;
// TODO(weiliangc): Add API to backing AHardwareBuffer.
@@ -428,9 +446,6 @@ class GPU_GLES2_EXPORT SharedImageRepresentationOverlay
// TODO(penghuang): Refactor it to not depend on GL.
// Get the backing as GLImage for GLSurface::ScheduleOverlayPlane.
virtual gl::GLImage* GetGLImage() = 0;
- // Optionally returns a fence to synchronize writes on the SharedImage with
- // overlay presentation.
- virtual std::unique_ptr<gfx::GpuFence> GetReadFence() = 0;
};
// An interface that allows a SharedImageBacking to hold a reference to VA-API
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc
index afde8952d16..6fc39638f49 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.cc
@@ -90,7 +90,7 @@ WGPUTexture SharedImageRepresentationDawnD3D::BeginAccess(
dawn_native::d3d12::ExternalImageDescriptorDXGISharedHandle descriptor;
descriptor.cTextureDescriptor = &texture_descriptor;
- descriptor.isCleared = IsCleared();
+ descriptor.isInitialized = IsCleared();
descriptor.sharedHandle = shared_handle;
descriptor.acquireMutexKey = shared_mutex_acquire_key;
descriptor.isSwapChainTexture =
@@ -98,11 +98,7 @@ WGPUTexture SharedImageRepresentationDawnD3D::BeginAccess(
SHARED_IMAGE_USAGE_WEBGPU_SWAP_CHAIN_TEXTURE);
texture_ = dawn_native::d3d12::WrapSharedHandle(device_, &descriptor);
- if (texture_) {
- // Keep a reference to the texture so that it stays valid (its content
- // might be destroyed).
- dawn_procs_.textureReference(texture_);
- } else {
+ if (!texture_) {
d3d_image_backing->EndAccessD3D12();
}
@@ -138,7 +134,9 @@ SharedImageRepresentationOverlayD3D::SharedImageRepresentationOverlayD3D(
MemoryTypeTracker* tracker)
: SharedImageRepresentationOverlay(manager, backing, tracker) {}
-bool SharedImageRepresentationOverlayD3D::BeginReadAccess() {
+bool SharedImageRepresentationOverlayD3D::BeginReadAccess(
+ std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) {
// Note: only the DX11 video decoder uses this overlay and does not need to
// synchronize read access from different devices.
return true;
@@ -150,9 +148,4 @@ gl::GLImage* SharedImageRepresentationOverlayD3D::GetGLImage() {
return static_cast<SharedImageBackingD3D*>(backing())->GetGLImage();
}
-std::unique_ptr<gfx::GpuFence>
-SharedImageRepresentationOverlayD3D::GetReadFence() {
- return nullptr;
-}
-
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h
index 164b974431d..501e3047847 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_d3d.h
@@ -77,11 +77,11 @@ class SharedImageRepresentationOverlayD3D
~SharedImageRepresentationOverlayD3D() override = default;
private:
- bool BeginReadAccess() override;
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) override;
void EndReadAccess() override;
gl::GLImage* GetGLImage() override;
- std::unique_ptr<gfx::GpuFence> GetReadFence() override;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
index 101b41e84cd..9199032ed80 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_dawn_ozone.cc
@@ -7,6 +7,7 @@
#include <dawn_native/VulkanBackend.h>
#include <vulkan/vulkan.h>
+#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_refptr.h"
#include "gpu/command_buffer/service/memory_tracking.h"
@@ -69,7 +70,7 @@ WGPUTexture SharedImageRepresentationDawnOzone::BeginAccess(
dawn_native::vulkan::ExternalImageDescriptorDmaBuf descriptor = {};
descriptor.cTextureDescriptor = &texture_descriptor;
- descriptor.isCleared = IsCleared();
+ descriptor.isInitialized = IsCleared();
// Import the dma-buf into Dawn via the Vulkan backend. As per the Vulkan
// documentation, importing memory from a file descriptor transfers
// ownership of the fd from the application to the Vulkan implementation.
@@ -82,11 +83,7 @@ WGPUTexture SharedImageRepresentationDawnOzone::BeginAccess(
descriptor.waitFDs = {};
texture_ = dawn_native::vulkan::WrapVulkanImage(device_, &descriptor);
- if (texture_) {
- // Keep a reference to the texture so that it stays valid (its content
- // might be destroyed).
- dawn_procs_->data.textureReference(texture_);
- } else {
+ if (!texture_) {
close(fd);
}
@@ -98,12 +95,19 @@ void SharedImageRepresentationDawnOzone::EndAccess() {
return;
}
- if (dawn_native::IsTextureSubresourceInitialized(texture_, 0, 1, 0, 1)) {
- SetCleared();
- }
+ // Grab the signal semaphore from dawn
+ dawn_native::vulkan::ExternalImageExportInfoOpaqueFD export_info;
+ if (!dawn_native::vulkan::ExportVulkanImage(
+ texture_, VK_IMAGE_LAYOUT_UNDEFINED, &export_info)) {
+ DLOG(ERROR) << "Failed to export Dawn Vulkan image.";
+ } else {
+ if (export_info.isInitialized) {
+ SetCleared();
+ }
- // TODO(hob): Synchronize access to the dma-buf by exporting the VkSemaphore
- // from the WebGPU texture.
+ // TODO(hob): Synchronize access to the dma-buf by waiting on
+ // |export_info.semaphoreHandles|
+ }
dawn_procs_->data.textureDestroy(texture_);
dawn_procs_->data.textureRelease(texture_);
texture_ = nullptr;
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc
index 9cb70be0488..d916cfb5a7e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_video.cc
@@ -414,7 +414,8 @@ class SharedImageRepresentationOverlayVideo
stream_image_(backing->stream_texture_sii_) {}
protected:
- bool BeginReadAccess() override {
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) override {
// A |CodecImage| is already in a SurfaceView, render content to the
// overlay.
if (!stream_image_->HasTextureOwner()) {
@@ -433,8 +434,6 @@ class SharedImageRepresentationOverlayVideo
return stream_image_.get();
}
- std::unique_ptr<gfx::GpuFence> GetReadFence() override { return nullptr; }
-
void NotifyOverlayPromotion(bool promotion,
const gfx::Rect& bounds) override {
stream_image_->NotifyOverlayPromotion(promotion, bounds);
diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc
index aad43a38881..341de2c7de9 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.cc
+++ b/chromium/gpu/command_buffer/service/skia_utils.cc
@@ -4,11 +4,13 @@
#include "gpu/command_buffer/service/skia_utils.h"
+#include "base/command_line.h"
#include "base/logging.h"
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/config/gpu_switches.h"
#include "gpu/config/skia_limits.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
@@ -61,7 +63,7 @@ void DeleteSkObject(SharedContextState* context_state, sk_sp<T> sk_object) {
auto* fence_helper =
context_state->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
fence_helper->EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
- [](const sk_sp<GrContext>& gr_context, sk_sp<T> sk_object,
+ [](const sk_sp<GrDirectContext>& gr_context, sk_sp<T> sk_object,
gpu::VulkanDeviceQueue* device_queue, bool is_lost) {},
sk_ref_sp(context_state->gr_context()), std::move(sk_object)));
#endif
@@ -86,6 +88,11 @@ GrContextOptions GetDefaultGrContextOptions(GrContextType type) {
options.fInternalMultisampleCount = 0;
if (type == GrContextType::kMetal)
options.fRuntimeProgramCacheSize = 1024;
+
+ options.fSuppressMipmapSupport =
+ base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableMipmapGeneration);
+
return options;
}
@@ -180,8 +187,9 @@ void DeleteGrBackendTexture(SharedContextState* context_state,
auto* fence_helper =
context_state->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
fence_helper->EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
- [](const sk_sp<GrContext>& gr_context, GrBackendTexture backend_texture,
- gpu::VulkanDeviceQueue* device_queue, bool is_lost) {
+ [](const sk_sp<GrDirectContext>& gr_context,
+ GrBackendTexture backend_texture, gpu::VulkanDeviceQueue* device_queue,
+ bool is_lost) {
if (!gr_context->abandoned())
gr_context->deleteBackendTexture(std::move(backend_texture));
},
@@ -218,6 +226,8 @@ GrVkImageInfo CreateGrVkImageInfo(VulkanImage* image) {
image_info.fImageTiling = image->image_tiling();
image_info.fImageLayout = image->image_layout();
image_info.fFormat = image->format();
+ image_info.fImageUsageFlags = image->usage();
+ image_info.fSampleCount = 1;
image_info.fLevelCount = 1;
image_info.fCurrentQueueFamily = image->queue_family_index();
image_info.fProtected = is_protected ? GrProtected::kYes : GrProtected::kNo;
@@ -283,4 +293,22 @@ GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo(
#endif // BUILDFLAG(ENABLE_VULKAN)
+bool ShouldVulkanSyncCpuForSkiaSubmit(
+ viz::VulkanContextProvider* context_provider) {
+#if BUILDFLAG(ENABLE_VULKAN)
+ if (context_provider) {
+ const base::Optional<uint32_t>& sync_cpu_memory_limit =
+ context_provider->GetSyncCpuMemoryLimit();
+ if (sync_cpu_memory_limit.has_value()) {
+ uint64_t total_allocated_bytes = gpu::vma::GetTotalAllocatedMemory(
+ context_provider->GetDeviceQueue()->vma_allocator());
+ if (total_allocated_bytes > sync_cpu_memory_limit.value()) {
+ return true;
+ }
+ }
+ }
+#endif
+ return false;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h
index e5661a0e2b2..49843126d23 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.h
+++ b/chromium/gpu/command_buffer/service/skia_utils.h
@@ -92,6 +92,12 @@ GPU_GLES2_EXPORT GrVkYcbcrConversionInfo CreateGrVkYcbcrConversionInfo(
const base::Optional<VulkanYCbCrInfo>& ycbcr_info);
#endif // BUILDFLAG(ENABLE_VULKAN)
+// Helper that returns true when Vulkan memory usage is high enough
+// that Skia submit calls should synchronize with the CPU in order
+// to free released memory immediately.
+GPU_GLES2_EXPORT bool ShouldVulkanSyncCpuForSkiaSubmit(
+ viz::VulkanContextProvider* context_provider);
+
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
index d51e7bd7f7b..3c98be2bcbf 100644
--- a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
@@ -6,6 +6,7 @@
#include "build/build_config.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
+#include "skia/ext/legacy_display_globals.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/mock/GrMockTypes.h"
@@ -72,7 +73,9 @@ class TestSharedImageRepresentationSkia : public SharedImageRepresentationSkia {
if (!static_cast<TestSharedImageBacking*>(backing())->can_access()) {
return nullptr;
}
- return SkSurface::MakeRasterN32Premul(size().width(), size().height());
+ SkSurfaceProps props = skia::LegacyDisplayGlobals::GetSkSurfaceProps();
+ return SkSurface::MakeRasterN32Premul(size().width(), size().height(),
+ &props);
}
void EndWriteAccess(sk_sp<SkSurface> surface) override {}
sk_sp<SkPromiseImageTexture> BeginReadAccess(
@@ -115,10 +118,12 @@ class TestSharedImageRepresentationOverlay
MemoryTypeTracker* tracker)
: SharedImageRepresentationOverlay(manager, backing, tracker) {}
- bool BeginReadAccess() override { return true; }
+ bool BeginReadAccess(std::vector<gfx::GpuFence>* acquire_fences,
+ std::vector<gfx::GpuFence>* release_fences) override {
+ return true;
+ }
void EndReadAccess() override {}
gl::GLImage* GetGLImage() override { return nullptr; }
- std::unique_ptr<gfx::GpuFence> GetReadFence() override { return nullptr; }
#if defined(OS_ANDROID)
void NotifyOverlayPromotion(bool promotion,
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index 129463a1f9a..528a13bd7c4 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -360,6 +360,8 @@ bool SizedFormatAvailable(const FeatureInfo* feature_info,
if ((feature_info->feature_flags().chromium_image_ycbcr_420v &&
internal_format == GL_RGB_YCBCR_420V_CHROMIUM) ||
+ (feature_info->feature_flags().chromium_image_ycbcr_p010 &&
+ internal_format == GL_RGB_YCBCR_P010_CHROMIUM) ||
(feature_info->feature_flags().chromium_image_ycbcr_422 &&
internal_format == GL_RGB_YCBCR_422_CHROMIUM)) {
return true;
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index bacd6d560e7..3d3048e1a8b 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -39,11 +39,17 @@ constexpr size_t kMaxWireBufferSize =
std::min(IPC::Channel::kMaximumMessageSize,
static_cast<size_t>(1024 * 1024));
+constexpr size_t kDawnReturnCmdsOffset =
+ offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer);
+
+static_assert(kDawnReturnCmdsOffset < kMaxWireBufferSize, "");
+
class WireServerCommandSerializer : public dawn_wire::CommandSerializer {
public:
WireServerCommandSerializer(DecoderClient* client,
DawnDeviceClientID device_client_id);
~WireServerCommandSerializer() override = default;
+ size_t GetMaximumAllocationSize() const final;
void* GetCmdSpace(size_t size) final;
bool Flush() final;
@@ -68,37 +74,29 @@ WireServerCommandSerializer::WireServerCommandSerializer(
header->device_client_id = device_client_id;
}
-void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
- // TODO(enga): Handle chunking commands if size +
- // offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)>
- // kMaxWireBufferSize.
- size_t total_wire_buffer_size =
- (base::CheckedNumeric<size_t>(size) +
- base::CheckedNumeric<size_t>(
- offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)))
- .ValueOrDie();
- if (total_wire_buffer_size > kMaxWireBufferSize) {
- NOTREACHED();
- return nullptr;
- }
+size_t WireServerCommandSerializer::GetMaximumAllocationSize() const {
+ return kMaxWireBufferSize - kDawnReturnCmdsOffset;
+}
- // |next_offset| should never be more than kMaxWireBufferSize +
- // kMaxWireBufferSize.
+void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
+ // Note: Dawn will never call this function with |size| >
+ // GetMaximumAllocationSize().
DCHECK_LE(put_offset_, kMaxWireBufferSize);
- DCHECK_LE(size, kMaxWireBufferSize);
+ DCHECK_LE(size, GetMaximumAllocationSize());
+
+ // Statically check that kMaxWireBufferSize + kMaxWireBufferSize is
+ // a valid uint32_t. We can add put_offset_ and size without overflow.
static_assert(base::CheckAdd(kMaxWireBufferSize, kMaxWireBufferSize)
.IsValid<uint32_t>(),
"");
- uint32_t next_offset = put_offset_ + size;
-
+ uint32_t next_offset = put_offset_ + static_cast<uint32_t>(size);
if (next_offset > buffer_.size()) {
Flush();
// TODO(enga): Keep track of how much command space the application is using
// and adjust the buffer size accordingly.
- DCHECK_EQ(put_offset_,
- offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer));
- next_offset = put_offset_ + size;
+ DCHECK_EQ(put_offset_, kDawnReturnCmdsOffset);
+ next_offset = put_offset_ + static_cast<uint32_t>(size);
}
uint8_t* ptr = &buffer_[put_offset_];
@@ -107,8 +105,7 @@ void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
}
bool WireServerCommandSerializer::Flush() {
- if (put_offset_ >
- offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer)) {
+ if (put_offset_ > kDawnReturnCmdsOffset) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
"WireServerCommandSerializer::Flush", "bytes", put_offset_);
@@ -117,7 +114,7 @@ bool WireServerCommandSerializer::Flush() {
"DawnReturnCommands", return_trace_id++);
client_->HandleReturnData(base::make_span(buffer_.data(), put_offset_));
- put_offset_ = offsetof(cmds::DawnReturnCommandsInfo, deserialized_buffer);
+ put_offset_ = kDawnReturnCmdsOffset;
}
return true;
}
@@ -148,6 +145,7 @@ class DawnDeviceAndWireServer {
~DawnDeviceAndWireServer();
WGPUDevice GetWGPUDevice() const;
+ bool HasPollingWork() const;
void PerformPollingWork();
error::Error HandleDawnCommands(const volatile char* dawn_commands,
size_t size);
@@ -179,6 +177,8 @@ class DawnDeviceAndWireServer {
base::flat_map<std::tuple<uint32_t, uint32_t>,
std::unique_ptr<SharedImageRepresentationAndAccess>>
associated_shared_image_map_;
+
+ bool has_polling_work_ = false;
};
DawnDeviceAndWireServer::DawnDeviceAndWireServer(
@@ -218,7 +218,7 @@ WGPUDevice DawnDeviceAndWireServer::GetWGPUDevice() const {
}
void DawnDeviceAndWireServer::PerformPollingWork() {
- dawn_procs_.deviceTick(wgpu_device_);
+ has_polling_work_ = dawn_native::DeviceTick(wgpu_device_);
wire_serializer_->Flush();
}
@@ -229,6 +229,7 @@ error::Error DawnDeviceAndWireServer::HandleDawnCommands(
NOTREACHED();
return error::kLostContext;
}
+ has_polling_work_ = dawn_native::DeviceTick(wgpu_device_);
wire_serializer_->Flush();
return error::kNoError;
}
@@ -308,6 +309,10 @@ error::Error DawnDeviceAndWireServer::DissociateMailbox(
return error::kNoError;
}
+bool DawnDeviceAndWireServer::HasPollingWork() const {
+ return has_polling_work_;
+}
+
} // namespace
class WebGPUDecoderImpl final : public WebGPUDecoder {
@@ -386,9 +391,14 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
bool HasMoreIdleWork() const override { return false; }
void PerformIdleWork() override {}
- // TODO(crbug.com/940985): Optimize so that this only returns true when
- // deviceTick is needed.
- bool HasPollingWork() const override { return true; }
+ bool HasPollingWork() const override {
+ for (auto& iter : dawn_device_and_wire_servers_) {
+ if (iter.second->HasPollingWork()) {
+ return true;
+ }
+ }
+ return false;
+ }
void PerformPollingWork() override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
@@ -555,10 +565,13 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
void SendAdapterProperties(DawnRequestAdapterSerial request_adapter_serial,
int32_t adapter_service_id,
- const dawn_native::Adapter& adapter);
+ const dawn_native::Adapter& adapter,
+ const char* error_message = nullptr);
void SendRequestedDeviceInfo(DawnDeviceClientID device_client_id,
bool is_request_device_success);
+ const GrContextType gr_context_type_;
+
std::unique_ptr<SharedImageRepresentationFactory>
shared_image_representation_factory_;
@@ -570,6 +583,8 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
std::unique_ptr<dawn_native::Instance> dawn_instance_;
std::vector<dawn_native::Adapter> dawn_adapters_;
+ bool disable_dawn_robustness_;
+
DISALLOW_COPY_AND_ASSIGN(WebGPUDecoderImpl);
};
@@ -605,13 +620,15 @@ WebGPUDecoderImpl::WebGPUDecoderImpl(
gles2::Outputter* outputter,
const GpuPreferences& gpu_preferences)
: WebGPUDecoder(client, command_buffer_service, outputter),
+ gr_context_type_(gpu_preferences.gr_context_type),
shared_image_representation_factory_(
std::make_unique<SharedImageRepresentationFactory>(
shared_image_manager,
memory_tracker)),
dawn_platform_(new DawnPlatform()),
memory_transfer_service_(new DawnServiceMemoryTransferService(this)),
- dawn_instance_(new dawn_native::Instance()) {
+ dawn_instance_(new dawn_native::Instance()),
+ disable_dawn_robustness_(gpu_preferences.disable_dawn_robustness) {
dawn_instance_->SetPlatform(dawn_platform_.get());
dawn_instance_->EnableBackendValidation(
gpu_preferences.enable_dawn_backend_validation);
@@ -656,6 +673,10 @@ error::Error WebGPUDecoderImpl::InitDawnDeviceAndSetWireServer(
device_descriptor.requiredExtensions.push_back("timestamp_query");
}
+ if (disable_dawn_robustness_) {
+ device_descriptor.forceEnabledToggles.push_back("disable_robustness");
+ }
+
WGPUDevice wgpu_device =
dawn_adapters_[requested_adapter_index].CreateDevice(&device_descriptor);
if (wgpu_device == nullptr) {
@@ -821,20 +842,28 @@ error::Error WebGPUDecoderImpl::DoCommands(unsigned int num_commands,
void WebGPUDecoderImpl::SendAdapterProperties(
DawnRequestAdapterSerial request_adapter_serial,
int32_t adapter_service_id,
- const dawn_native::Adapter& adapter) {
- WGPUDeviceProperties adapter_properties =
- (adapter) ? adapter.GetAdapterProperties() : WGPUDeviceProperties{};
-
- if (!adapter) {
+ const dawn_native::Adapter& adapter,
+ const char* error_message) {
+ WGPUDeviceProperties adapter_properties;
+ size_t serialized_adapter_properties_size = 0;
+
+ if (adapter) {
+ adapter_properties = adapter.GetAdapterProperties();
+ serialized_adapter_properties_size =
+ dawn_wire::SerializedWGPUDevicePropertiesSize(&adapter_properties);
+ } else {
// If there's no adapter, the adapter_service_id should be -1
DCHECK_EQ(adapter_service_id, -1);
}
- size_t serialized_adapter_properties_size =
- dawn_wire::SerializedWGPUDevicePropertiesSize(&adapter_properties);
+ size_t error_message_size =
+ error_message == nullptr ? 0 : strlen(error_message);
+
+ // Get serialization space for the return struct and variable-length data:
+ // The serialized adapter properties, the error message, and null-terminator.
std::vector<char> serialized_buffer(
offsetof(cmds::DawnReturnAdapterInfo, deserialized_buffer) +
- serialized_adapter_properties_size);
+ serialized_adapter_properties_size + error_message_size + 1);
cmds::DawnReturnAdapterInfo* return_adapter_info =
reinterpret_cast<cmds::DawnReturnAdapterInfo*>(serialized_buffer.data());
@@ -846,9 +875,28 @@ void WebGPUDecoderImpl::SendAdapterProperties(
return_adapter_info->header.request_adapter_serial = request_adapter_serial;
return_adapter_info->header.adapter_service_id = adapter_service_id;
- // Set serialized adapter properties
- dawn_wire::SerializeWGPUDeviceProperties(
- &adapter_properties, return_adapter_info->deserialized_buffer);
+ DCHECK(serialized_adapter_properties_size <=
+ std::numeric_limits<uint32_t>::max());
+
+ return_adapter_info->adapter_properties_size =
+ static_cast<uint32_t>(serialized_adapter_properties_size);
+
+ if (adapter) {
+ // Set serialized adapter properties
+ dawn_wire::SerializeWGPUDeviceProperties(
+ &adapter_properties, return_adapter_info->deserialized_buffer);
+ }
+
+ // Copy the error message
+ memcpy(return_adapter_info->deserialized_buffer +
+ serialized_adapter_properties_size,
+ error_message, error_message_size);
+
+ // Write the null-terminator.
+ // We don't copy (error_message_size + 1) above because |error_message| may
+ // be nullptr instead of zero-length.
+ return_adapter_info->deserialized_buffer[serialized_adapter_properties_size +
+ error_message_size] = '\0';
client()->HandleReturnData(base::make_span(
reinterpret_cast<const uint8_t*>(serialized_buffer.data()),
@@ -880,6 +928,15 @@ error::Error WebGPUDecoderImpl::HandleRequestAdapter(
DawnRequestAdapterSerial request_adapter_serial =
static_cast<DawnRequestAdapterSerial>(c.request_adapter_serial);
+ if (gr_context_type_ != GrContextType::kVulkan) {
+#if defined(OS_LINUX) || defined(OS_CHROMEOS)
+ SendAdapterProperties(request_adapter_serial, -1, nullptr,
+ "WebGPU on Linux requires command-line flag "
+ "--enable-features=Vulkan,UseSkiaRenderer");
+ return error::kNoError;
+#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
+ }
+
int32_t requested_adapter_index = GetPreferredAdapterIndex(power_preference);
if (requested_adapter_index < 0) {
// There are no adapters to return since webgpu is not supported here
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
index e2dcad9734b..9072d38f14b 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
@@ -6,24 +6,13 @@
#include "build/build_config.h"
#include "gpu/command_buffer/client/client_test_helper.h"
-#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
-#include "gpu/command_buffer/service/mailbox_manager_impl.h"
-#include "gpu/command_buffer/service/shared_image_factory.h"
-#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "gpu/config/gpu_test_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_surface.h"
-#include "ui/gl/init/gl_factory.h"
-
-#if defined(OS_MAC)
-#include "gpu/ipc/service/gpu_memory_buffer_factory_io_surface.h"
-#endif
using ::testing::_;
using ::testing::Return;
@@ -40,34 +29,11 @@ class WebGPUDecoderTest : public ::testing::Test {
if (!WebGPUSupported()) {
return;
}
- // Shared image factories for some backends take a dependency on GL.
- // Failure to create a test context with a surface and making it current
- // will result in a "NoContext" context being current that asserts on all
- // GL calls.
- gl_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size(1, 1));
- ASSERT_NE(gl_surface_, nullptr);
-
- gl_context_ = gl::init::CreateGLContext(nullptr, gl_surface_.get(),
- gl::GLContextAttribs());
- ASSERT_NE(gl_context_, nullptr);
-
- gl_context_->MakeCurrent(gl_surface_.get());
-
-#if defined(OS_WIN)
- // D3D shared images are only supported with passthrough command decoder.
- gpu_preferences_.use_passthrough_cmd_decoder = true;
-#endif // OS_WIN
-
- ImageFactory* image_factory = nullptr;
-#if defined(OS_MAC)
- image_factory = &image_factory_;
-#endif
-
decoder_client_.reset(new FakeDecoderClient());
command_buffer_service_.reset(new FakeCommandBufferServiceBase());
decoder_.reset(WebGPUDecoder::Create(
- decoder_client_.get(), command_buffer_service_.get(),
- &shared_image_manager_, nullptr, &outputter_, gpu_preferences_));
+ decoder_client_.get(), command_buffer_service_.get(), nullptr, nullptr,
+ &outputter_, GpuPreferences()));
ASSERT_EQ(decoder_->Initialize(), ContextResult::kSuccess);
constexpr uint32_t kAdapterClientID = 0;
@@ -81,30 +47,11 @@ class WebGPUDecoderTest : public ::testing::Test {
cmds::RequestDevice requestDeviceCmd;
requestDeviceCmd.Init(kDeviceClientID, kAdapterServiceID, 0, 0, 0);
ASSERT_EQ(error::kNoError, ExecuteCmd(requestDeviceCmd));
-
- factory_ = std::make_unique<SharedImageFactory>(
- gpu_preferences_, GpuDriverBugWorkarounds(), GpuFeatureInfo(),
- /*context_state=*/nullptr, &mailbox_manager_, &shared_image_manager_,
- image_factory, /*tracker=*/nullptr,
- /*enable_wrapped_sk_image=*/false);
- }
-
- void TearDown() override {
- if (factory_) {
- factory_->DestroyAllSharedImages(true);
- factory_.reset();
- }
-
- gl_surface_.reset();
- gl_context_.reset();
}
bool WebGPUSupported() const {
// WebGPU does not work on Win7 because there is no D3D12 on Win7
- // Linux bots running Vulkan are not properly initializing the shared
- // image extensions.
- return !GPUTestBotConfig::CurrentConfigMatches("Win7") &&
- !GPUTestBotConfig::CurrentConfigMatches("Linux");
+ return !GPUTestBotConfig::CurrentConfigMatches("Win7");
}
template <typename T>
@@ -117,32 +64,11 @@ class WebGPUDecoderTest : public ::testing::Test {
&entries_processed);
}
- template <typename T>
- error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
- static_assert(T::kArgFlags == cmd::kAtLeastN,
- "T::kArgFlags should equal cmd::kAtLeastN");
- int entries_processed = 0;
- return decoder_->DoCommands(1, (const void*)&cmd,
- ComputeNumEntries(sizeof(cmd) + data_size),
- &entries_processed);
- }
-
protected:
- GpuPreferences gpu_preferences_;
std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_;
std::unique_ptr<WebGPUDecoder> decoder_;
std::unique_ptr<FakeDecoderClient> decoder_client_;
gles2::TraceOutputter outputter_;
- SharedImageManager shared_image_manager_;
- std::unique_ptr<SharedImageFactory> factory_;
- gles2::MailboxManagerImpl mailbox_manager_;
-#if defined(OS_MAC)
- // SharedImages on macOS require a valid image factory.
- GpuMemoryBufferFactoryIOSurface image_factory_;
-#endif
- scoped_refptr<gl::GLSurface> gl_surface_;
- scoped_refptr<gl::GLContext> gl_context_;
-
static const DawnDeviceClientID kDeviceClientID = 0u;
};
@@ -157,158 +83,5 @@ TEST_F(WebGPUDecoderTest, DawnCommands) {
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
}
-struct AssociateMailboxCmdStorage {
- cmds::AssociateMailboxImmediate cmd;
- GLbyte data[GL_MAILBOX_SIZE_CHROMIUM];
-};
-
-TEST_F(WebGPUDecoderTest, AssociateMailbox) {
- if (!WebGPUSupported()) {
- LOG(ERROR) << "Test skipped because WebGPU isn't supported";
- return;
- }
-
- gpu::Mailbox mailbox = Mailbox::GenerateForSharedImage();
- EXPECT_TRUE(factory_->CreateSharedImage(
- mailbox, viz::ResourceFormat::RGBA_8888, {1, 1},
- gfx::ColorSpace::CreateSRGB(), kTopLeft_GrSurfaceOrigin,
- kPremul_SkAlphaType, gfx::kNullAcceleratedWidget,
- SHARED_IMAGE_USAGE_WEBGPU));
-
- // Error case: invalid mailbox
- {
- gpu::Mailbox bad_mailbox;
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
- bad_mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(bad_mailbox.name)));
- }
-
- // Error case: device client id doesn't exist.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID + 1, 0, 1, 0, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Error case: device generation is invalid.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 42, 1, 0, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Error case: texture ID invalid for the wire server.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 42, 42, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Error case: invalid usage.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 42, 42, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Error case: invalid texture usage.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Force32,
- mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Control case: test a successful call to AssociateMailbox
- // (1, 0) is a valid texture ID on dawn_wire server start.
- // The control case is not put first because it modifies the internal state
- // of the Dawn wire server and would make calls with the same texture ID
- // and generation invalid.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kNoError,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Error case: associated to an already associated texture.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kInvalidArguments,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Dissociate the image from the control case to remove its reference.
- {
- cmds::DissociateMailbox cmd;
- cmd.Init(kDeviceClientID, 1, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- }
-}
-
-TEST_F(WebGPUDecoderTest, DissociateMailbox) {
- if (!WebGPUSupported()) {
- LOG(ERROR) << "Test skipped because WebGPU isn't supported";
- return;
- }
-
- gpu::Mailbox mailbox = Mailbox::GenerateForSharedImage();
- EXPECT_TRUE(factory_->CreateSharedImage(
- mailbox, viz::ResourceFormat::RGBA_8888, {1, 1},
- gfx::ColorSpace::CreateSRGB(), kTopLeft_GrSurfaceOrigin,
- kPremul_SkAlphaType, kNullSurfaceHandle, SHARED_IMAGE_USAGE_WEBGPU));
-
- // Associate a mailbox so we can later dissociate it.
- {
- AssociateMailboxCmdStorage cmd;
- cmd.cmd.Init(kDeviceClientID, 0, 1, 0, WGPUTextureUsage_Sampled,
- mailbox.name);
- EXPECT_EQ(error::kNoError,
- ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
- }
-
- // Error case: wrong texture ID
- {
- cmds::DissociateMailbox cmd;
- cmd.Init(kDeviceClientID, 42, 0);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
- }
-
- // Error case: wrong texture generation
- {
- cmds::DissociateMailbox cmd;
- cmd.Init(kDeviceClientID, 1, 42);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
- }
-
- // Error case: invalid client device ID
- {
- cmds::DissociateMailbox cmd;
- cmd.Init(kDeviceClientID + 1, 1, 0);
- EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
- }
-
- // Success case
- {
- cmds::DissociateMailbox cmd;
- cmd.Init(kDeviceClientID, 1, 0);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- }
-}
-
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/config/BUILD.gn b/chromium/gpu/config/BUILD.gn
index 569657ae262..859cc506e4c 100644
--- a/chromium/gpu/config/BUILD.gn
+++ b/chromium/gpu/config/BUILD.gn
@@ -4,6 +4,8 @@
import("//build/config/chrome_build.gni")
import("//build/config/chromecast_build.gni")
+import("//build/config/chromeos/ui_mode.gni")
+import("//build/config/python.gni")
import("//build/config/ui.gni")
import("//gpu/vulkan/features.gni")
@@ -11,13 +13,6 @@ if (is_android) {
import("//build/config/android/rules.gni")
}
-# This file depends on the legacy global sources assignment filter. It should
-# be converted to check target platform before assigning source files to the
-# sources variable. Remove this import and set_sources_assignment_filter call
-# when the file has been converted. See https://crbug.com/1018739 for details.
-import("//build/config/deprecated_default_sources_assignment_filter.gni")
-set_sources_assignment_filter(deprecated_default_sources_assignment_filter)
-
group("config") {
if (is_component_build) {
public_deps = [ "//gpu" ]
@@ -37,7 +32,8 @@ process_json_outputs = [
"$target_gen_dir/software_rendering_list_exceptions_autogen.h",
]
-action("process_json") {
+# TODO(crbug.com/1112471): Get this to run cleanly under Python 3.
+python2_action("process_json") {
script = "process_json.py"
inputs = [
@@ -61,7 +57,7 @@ action("process_json") {
args += [ "macosx" ]
} else if (is_android) {
args += [ "android" ]
- } else if (is_chromeos) {
+ } else if (is_ash) {
args += [ "chromeos" ]
} else if (is_fuchsia) {
args += [ "fuchsia" ]
@@ -139,9 +135,6 @@ source_set("config_sources") {
"gpu_driver_bug_workaround_type.h",
"gpu_driver_bug_workarounds.cc",
"gpu_driver_bug_workarounds.h",
- "gpu_dx_diagnostics_win.cc",
- "gpu_extra_info.cc",
- "gpu_extra_info.h",
"gpu_feature_info.cc",
"gpu_feature_info.h",
"gpu_feature_type.h",
@@ -151,10 +144,6 @@ source_set("config_sources") {
"gpu_info.h",
"gpu_info_collector.cc",
"gpu_info_collector.h",
- "gpu_info_collector_android.cc",
- "gpu_info_collector_linux.cc",
- "gpu_info_collector_mac.mm",
- "gpu_info_collector_win.cc",
"gpu_mode.h",
"gpu_preferences.cc",
"gpu_preferences.h",
@@ -191,12 +180,14 @@ source_set("config_sources") {
"//base",
"//build:branding_buildflags",
"//build:chromecast_buildflags",
+ "//build:chromeos_buildflags",
"//gpu/command_buffer/common:common_sources",
"//gpu/ipc/common:gpu_preferences_interface",
"//gpu/vulkan:buildflags",
"//media:media_buildflags",
"//third_party/re2",
"//third_party/vulkan_headers",
+ "//ui/gfx",
"//ui/gl",
"//ui/gl:buildflags",
"//ui/gl/init",
@@ -210,7 +201,14 @@ source_set("config_sources") {
# Prefer mesa GL headers to system headers, which cause problems on Win.
include_dirs = [ "//third_party/mesa_headers" ]
+ if (is_android) {
+ sources += [ "gpu_info_collector_android.cc" ]
+ }
if (is_win) {
+ sources += [
+ "gpu_dx_diagnostics_win.cc",
+ "gpu_info_collector_win.cc",
+ ]
libs = [
"dxgi.lib",
"dxguid.lib",
@@ -224,8 +222,12 @@ source_set("config_sources") {
}
}
if (is_mac) {
+ sources += [ "gpu_info_collector_mac.mm" ]
frameworks = [ "OpenGL.framework" ]
}
+ if (is_linux || is_chromeos) {
+ sources += [ "gpu_info_collector_linux.cc" ]
+ }
if (is_linux || is_chromeos || is_mac) {
deps += [ "//third_party/angle:angle_gpu_info_util" ]
}
@@ -245,9 +247,19 @@ if (is_android) {
template = "android/java/src/org/chromium/gpu/config/GpuSwitches.java.tmpl"
}
+ java_cpp_features("java_features_srcjar") {
+ # External code should depend on ":config_java" instead.
+ visibility = [ ":*" ]
+ sources = [ "gpu_finch_features.cc" ]
+ template = "android/java/src/org/chromium/gpu/config/GpuFeatures.java.tmpl"
+ }
+
android_library("config_java") {
- # Right now, this only includes the Java switches. But if we need more Java
- # files, they should be added here as necessary.
- srcjar_deps = [ ":java_switches_srcjar" ]
+ # Right now, this only includes the Java switches/features. But if we need
+ # more Java files, they should be added here as necessary.
+ srcjar_deps = [
+ ":java_features_srcjar",
+ ":java_switches_srcjar",
+ ]
}
}
diff --git a/chromium/gpu/config/DIR_METADATA b/chromium/gpu/config/DIR_METADATA
new file mode 100644
index 00000000000..8e87cca679a
--- /dev/null
+++ b/chromium/gpu/config/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Internals>GPU>Internals"
+} \ No newline at end of file
diff --git a/chromium/gpu/config/OWNERS b/chromium/gpu/config/OWNERS
index ae61c99483f..84f82176d25 100644
--- a/chromium/gpu/config/OWNERS
+++ b/chromium/gpu/config/OWNERS
@@ -1,4 +1,2 @@
kbr@chromium.org
zmo@chromium.org
-
-# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/config/gpu_blocklist.cc b/chromium/gpu/config/gpu_blocklist.cc
index 694aa68bf62..31e41ffc606 100644
--- a/chromium/gpu/config/gpu_blocklist.cc
+++ b/chromium/gpu/config/gpu_blocklist.cc
@@ -29,10 +29,6 @@ std::unique_ptr<GpuBlocklist> GpuBlocklist::Create(
GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS);
list->AddSupportedFeature("accelerated_webgl",
GPU_FEATURE_TYPE_ACCELERATED_WEBGL);
- list->AddSupportedFeature("flash3d", GPU_FEATURE_TYPE_FLASH3D);
- list->AddSupportedFeature("flash_stage3d", GPU_FEATURE_TYPE_FLASH_STAGE3D);
- list->AddSupportedFeature("flash_stage3d_baseline",
- GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE);
list->AddSupportedFeature("accelerated_video_decode",
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE);
list->AddSupportedFeature("gpu_rasterization",
diff --git a/chromium/gpu/config/gpu_blocklist_unittest.cc b/chromium/gpu/config/gpu_blocklist_unittest.cc
index f30a35b52ae..d6289ce0a83 100644
--- a/chromium/gpu/config/gpu_blocklist_unittest.cc
+++ b/chromium/gpu/config/gpu_blocklist_unittest.cc
@@ -87,13 +87,6 @@ GPU_BLOCKLIST_FEATURE_TEST(Accelerated2DCanvas,
GPU_BLOCKLIST_FEATURE_TEST(AcceleratedWebGL, GPU_FEATURE_TYPE_ACCELERATED_WEBGL)
-GPU_BLOCKLIST_FEATURE_TEST(Flash3D, GPU_FEATURE_TYPE_FLASH3D)
-
-GPU_BLOCKLIST_FEATURE_TEST(FlashStage3D, GPU_FEATURE_TYPE_FLASH_STAGE3D)
-
-GPU_BLOCKLIST_FEATURE_TEST(FlashStage3DBaseline,
- GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE)
-
GPU_BLOCKLIST_FEATURE_TEST(AcceleratedVideoDecode,
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE)
diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc
index a7e5a25f70e..b01b04ca07e 100644
--- a/chromium/gpu/config/gpu_control_list.cc
+++ b/chromium/gpu/config/gpu_control_list.cc
@@ -16,6 +16,7 @@
#include "base/system/sys_info.h"
#include "base/values.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/config/gpu_util.h"
#include "third_party/re2/src/re2/re2.h"
@@ -273,7 +274,7 @@ bool GpuControlList::More::GLVersionInfoMismatch(
// static
GpuControlList::GLType GpuControlList::More::GetDefaultGLType() {
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
return kGLTypeGL;
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
return kGLTypeGL;
@@ -773,7 +774,7 @@ uint32_t GpuControlList::max_entry_id() const {
// static
GpuControlList::OsType GpuControlList::GetOsType() {
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
return kOsChromeOS;
#elif defined(OS_WIN)
return kOsWin;
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index b8330f2d246..e2b5aada7a2 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -60,9 +60,9 @@
{
"id": 19,
"comment": "Corresponds to software rendering list #140",
- "description": "Disable depth textures on older Qualcomm GPUs (legacy blocklist entry, original problem unclear)",
- "cr_bugs": [682075],
- "gl_renderer": "Adreno \\(TM\\) [23].*",
+ "description": "Disable depth textures on older Adreno 2xx Qualcomm GPUs (legacy blocklist entry, original problem unclear)",
+ "cr_bugs": [682075, 1042214],
+ "gl_renderer": "Adreno \\(TM\\) 2.*",
"features": [
"disable_depth_texture"
],
@@ -947,19 +947,6 @@
]
},
{
- "id": 129,
- "comment": "TODO(dshwang): Fix ANGLE crash. crbug.com/518889",
- "description": "ANGLE crash on glReadPixels from incomplete cube map texture",
- "cr_bugs": [518889],
- "os": {
- "type": "win"
- },
- "gl_renderer": "ANGLE.*",
- "features": [
- "force_cube_complete"
- ]
- },
- {
"id": 130,
"description": "NVIDIA fails glReadPixels from incomplete cube map texture",
"cr_bugs": [518889],
@@ -1096,17 +1083,6 @@
"disabled_extensions": ["GL_EXT_sRGB"]
},
{
- "id": 141,
- "cr_bugs": [570897],
- "description": "Framebuffer discarding can hurt performance on non-tilers",
- "os": {
- "type": "win"
- },
- "features": [
- "disable_discard_framebuffer"
- ]
- },
- {
"id": 142,
"cr_bugs": [563714],
"description": "Pack parameters work incorrectly with pack buffer bound",
@@ -1884,11 +1860,18 @@
"comment": [
"Corresponds to software rendering list #140",
"Mysteriously, the first workaround won't work without the second. crbug.com/698197#c10",
- "MSAA workaround shouldn't be needed beyond Adreno 3xx. crbug.com/682075#c17"
+ "MSAA workaround shouldn't be needed beyond Adreno 3xx and Android 9. crbug.com/682075#c17, crbug.com/1042214"
],
"description": "Some Adreno 3xx don't setup scissor state correctly when FBO0 is bound, nor support MSAA properly.",
- "cr_bugs": [670607, 682075, 696627, 698197, 707839],
+ "cr_bugs": [670607, 682075, 696627, 698197, 707839, 1042214],
"gl_renderer": "Adreno \\(TM\\) 3.*",
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "9.0"
+ }
+ },
"features": [
"force_update_scissor_state_when_binding_fbo0",
"disable_chromium_framebuffer_multisample"
@@ -2089,22 +2072,6 @@
]
},
{
- "id": 229,
- "description": "Overlay sizes bigger than screen aren't accelerated on some Intel drivers",
- "cr_bugs": [720059],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x8086",
- "driver_version": {
- "op": "<",
- "value": "21.20.16.4542"
- },
- "features": [
- "disable_larger_than_screen_overlays"
- ]
- },
- {
"id": 231,
"description": "Multisampled color renderbuffers can't be resized on Qualcomm 4xx/5xx",
"cr_bugs": [696126],
@@ -3438,6 +3405,9 @@
"id": 343,
"description": "Disable using GPU backed resource for imageBitmap from video on d3d9",
"cr_bugs": [1098445, 1105923],
+ "os": {
+ "type": "win"
+ },
"gl_renderer": ".*Direct3D9.*",
"features": [
"disable_imagebitmap_from_video_using_gpu"
@@ -3482,7 +3452,8 @@
"type": "win"
},
"intel_gpu_series": [
- "icelake"
+ "icelake",
+ "tigerlake"
],
"features": [
"force_rgb10a2_overlay_support_flags"
@@ -3642,6 +3613,38 @@
]
},
{
+ "id": 359,
+ "comment": "Corresponds to software rendering list #140",
+ "description": "Disable depth textures on Adreno 3xx Qualcomm GPUs pre-Android 9 (legacy blocklist entry, original problem unclear)",
+ "cr_bugs": [682075, 1042214],
+ "gl_renderer": "Adreno \\(TM\\) 3.*",
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "9.0"
+ }
+ },
+ "features": [
+ "disable_depth_texture"
+ ],
+ "disabled_extensions": [
+ "GL_OES_depth_texture"
+ ]
+ },
+ {
+ "id": 360,
+ "description": "8x MSAA for WebGL contexts is slow on Win Intel",
+ "cr_bugs": [1145793],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x8086",
+ "features": [
+ "max_msaa_sample_count_4"
+ ]
+ },
+ {
"id": 361,
"description": "VP9 Profile 2 decoding doesn't work on ChromeOS AMDs",
"cr_bugs": [1147466],
@@ -3653,6 +3656,22 @@
"features": [
"disable_accelerated_vp9_profile2_decode"
]
+ },
+ {
+ "id": 363,
+ "description": "VP9 decoding is too slow on Intel Broadwell, Skylake, and CherryTrail",
+ "cr_bugs": [616318, 1163111],
+ "os": {
+ "type": "macosx"
+ },
+ "intel_gpu_series": [
+ "broadwell",
+ "skylake",
+ "cherrytrail"
+ ],
+ "features": [
+ "disable_accelerated_vp9_decode"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_driver_bug_workarounds.cc b/chromium/gpu/config/gpu_driver_bug_workarounds.cc
index edd956b392d..707e5f65e39 100644
--- a/chromium/gpu/config/gpu_driver_bug_workarounds.cc
+++ b/chromium/gpu/config/gpu_driver_bug_workarounds.cc
@@ -31,8 +31,6 @@ void IntSetToWorkarounds(const std::vector<int32_t>& enabled_workarounds,
if (workarounds->max_copy_texture_chromium_size_1048576)
workarounds->max_copy_texture_chromium_size = 1048576;
- if (workarounds->max_copy_texture_chromium_size_262144)
- workarounds->max_copy_texture_chromium_size = 262144;
if (workarounds->max_3d_array_texture_size_1024)
workarounds->max_3d_array_texture_size = 1024;
diff --git a/chromium/gpu/config/gpu_extra_info.cc b/chromium/gpu/config/gpu_extra_info.cc
deleted file mode 100644
index 103f5de6688..00000000000
--- a/chromium/gpu/config/gpu_extra_info.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/config/gpu_extra_info.h"
-
-namespace gpu {
-
-ANGLEFeature::ANGLEFeature() = default;
-ANGLEFeature::ANGLEFeature(const ANGLEFeature& other) = default;
-ANGLEFeature::ANGLEFeature(ANGLEFeature&& other) = default;
-ANGLEFeature::~ANGLEFeature() = default;
-ANGLEFeature& ANGLEFeature::operator=(const ANGLEFeature& other) = default;
-ANGLEFeature& ANGLEFeature::operator=(ANGLEFeature&& other) = default;
-
-GpuExtraInfo::GpuExtraInfo() = default;
-GpuExtraInfo::GpuExtraInfo(const GpuExtraInfo&) = default;
-GpuExtraInfo::GpuExtraInfo(GpuExtraInfo&&) = default;
-GpuExtraInfo::~GpuExtraInfo() = default;
-GpuExtraInfo& GpuExtraInfo::operator=(const GpuExtraInfo&) = default;
-GpuExtraInfo& GpuExtraInfo::operator=(GpuExtraInfo&&) = default;
-
-} // namespace gpu
diff --git a/chromium/gpu/config/gpu_extra_info.h b/chromium/gpu/config/gpu_extra_info.h
deleted file mode 100644
index 24d45fdb127..00000000000
--- a/chromium/gpu/config/gpu_extra_info.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright (c) 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_CONFIG_GPU_EXTRA_INFO_H_
-#define GPU_CONFIG_GPU_EXTRA_INFO_H_
-
-#include <string>
-#include <vector>
-
-#include "gpu/gpu_export.h"
-#include "ui/gfx/buffer_types.h"
-
-#if defined(USE_X11)
-typedef unsigned long VisualID;
-#endif
-
-namespace gpu {
-
-// Specification of a feature that can be enabled/disable in ANGLE
-struct GPU_EXPORT ANGLEFeature {
- ANGLEFeature();
- ANGLEFeature(const ANGLEFeature& other);
- ANGLEFeature(ANGLEFeature&& other);
- ~ANGLEFeature();
- ANGLEFeature& operator=(const ANGLEFeature& other);
- ANGLEFeature& operator=(ANGLEFeature&& other);
-
- // Name of the feature in camel_case.
- std::string name;
-
- // Name of the category that the feature belongs to.
- std::string category;
-
- // One sentence description of the feature, why it's available.
- std::string description;
-
- // Full link to cr/angle bug if applicable.
- std::string bug;
-
- // Status, can be "enabled" or "disabled".
- std::string status;
-
- // Condition, contains the condition that set 'status'.
- std::string condition;
-};
-using ANGLEFeatures = std::vector<ANGLEFeature>;
-
-struct GPU_EXPORT GpuExtraInfo {
- GpuExtraInfo();
- GpuExtraInfo(const GpuExtraInfo&);
- GpuExtraInfo(GpuExtraInfo&&);
- ~GpuExtraInfo();
- GpuExtraInfo& operator=(const GpuExtraInfo&);
- GpuExtraInfo& operator=(GpuExtraInfo&&);
-
- // List of the currently available ANGLE features. May be empty if not
- // applicable.
- ANGLEFeatures angle_features;
-
-#if defined(USE_X11)
- VisualID system_visual = 0;
- VisualID rgba_visual = 0;
-
- std::vector<gfx::BufferUsageAndFormat> gpu_memory_buffer_support_x11;
-#endif
-};
-
-} // namespace gpu
-
-#endif // GPU_CONFIG_GPU_EXTRA_INFO_H_
diff --git a/chromium/gpu/config/gpu_feature_type.h b/chromium/gpu/config/gpu_feature_type.h
index da342c70914..08ae2b89c4b 100644
--- a/chromium/gpu/config/gpu_feature_type.h
+++ b/chromium/gpu/config/gpu_feature_type.h
@@ -13,10 +13,7 @@ namespace gpu {
enum GpuFeatureType {
GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS = 0,
GPU_FEATURE_TYPE_ACCELERATED_WEBGL,
- GPU_FEATURE_TYPE_FLASH3D,
- GPU_FEATURE_TYPE_FLASH_STAGE3D,
GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE,
- GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE,
GPU_FEATURE_TYPE_GPU_RASTERIZATION,
GPU_FEATURE_TYPE_ACCELERATED_WEBGL2,
GPU_FEATURE_TYPE_PROTECTED_VIDEO_DECODE,
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index 688b2911ad8..734dcc0dd19 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -1,17 +1,41 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include "gpu/config/gpu_finch_features.h"
+#include "base/command_line.h"
+#include "build/chromeos_buildflags.h"
+#include "gpu/config/gpu_switches.h"
+
#if defined(OS_ANDROID)
#include "base/android/android_image_reader_compat.h"
#include "base/android/build_info.h"
#include "base/metrics/field_trial_params.h"
+#include "base/strings/pattern.h"
#include "base/strings/string_split.h"
-#include "ui/gl/android/android_surface_control_compat.h"
+#include "base/strings/string_util.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#endif
namespace features {
+namespace {
+
+#if defined(OS_ANDROID)
+bool FieldIsInBlocklist(const char* current_value, std::string blocklist_str) {
+ std::vector<std::string> blocklist = base::SplitString(
+ blocklist_str, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ for (const std::string& blocklisted_value : blocklist) {
+ if (base::StartsWith(current_value, blocklisted_value,
+ base::CompareCase::INSENSITIVE_ASCII)) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+} // namespace
#if defined(OS_ANDROID)
// Used to limit GL version to 2.0 for skia raster on Android.
@@ -24,57 +48,78 @@ const base::Feature kUseGles2ForOopR{"UseGles2ForOopR",
const base::Feature kAndroidSurfaceControl{"AndroidSurfaceControl",
base::FEATURE_ENABLED_BY_DEFAULT};
+// https://crbug.com/1176185 List of devices on which SurfaceControl should be
+// disabled.
+const base::FeatureParam<std::string> kAndroidSurfaceControlBlocklist{
+ &kAndroidSurfaceControl, "AndroidSurfaceControlBlocklist", "capri|caprip"};
+
// Use AImageReader for MediaCodec and MediaPlyer on android.
const base::Feature kAImageReader{"AImageReader",
base::FEATURE_ENABLED_BY_DEFAULT};
+
+// If webview-draw-functor-uses-vulkan is set, use vulkan for composite and
+// raster.
+const base::Feature kWebViewVulkan{"WebViewVulkan",
+ base::FEATURE_ENABLED_BY_DEFAULT};
+
+// Used to enable/disable zero copy video path on webview for MCVD.
+const base::Feature kWebViewZeroCopyVideo{"WebViewZeroCopyVideo",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
+// List of devices on which WebViewZeroCopyVideo should be disabled.
+const base::FeatureParam<std::string> kWebViewZeroCopyVideoBlocklist{
+ &kWebViewZeroCopyVideo, "WebViewZeroCopyVideoBlocklist", ""};
+
+// Used to limit AImageReader max queue size to 1 since many devices especially
+// android Tv devices do not support more than 1 images.
+const base::Feature kLimitAImageReaderMaxSizeToOne{
+ "LimitAImageReaderMaxSizeToOne", base::FEATURE_ENABLED_BY_DEFAULT};
+
+// List of devices on which to limit AImageReader max queue size to 1.
+const base::FeatureParam<std::string> kLimitAImageReaderMaxSizeToOneBlocklist{
+ &kLimitAImageReaderMaxSizeToOne, "LimitAImageReaderMaxSizeToOneBlocklist",
+ "MIBOX"};
#endif
// Enable GPU Rasterization by default. This can still be overridden by
-// --force-gpu-rasterization or --disable-gpu-rasterization.
-#if defined(OS_MAC) || defined(OS_WIN) || defined(OS_CHROMEOS) || \
- defined(OS_ANDROID) || defined(OS_FUCHSIA)
+// --enable-gpu-rasterization or --disable-gpu-rasterization.
// DefaultEnableGpuRasterization has launched on Mac, Windows, ChromeOS, and
// Android.
const base::Feature kDefaultEnableGpuRasterization{
- "DefaultEnableGpuRasterization", base::FEATURE_ENABLED_BY_DEFAULT};
+ "DefaultEnableGpuRasterization",
+#if defined(OS_MAC) || defined(OS_WIN) || BUILDFLAG(IS_ASH) || \
+ defined(OS_ANDROID) || defined(OS_FUCHSIA)
+ base::FEATURE_ENABLED_BY_DEFAULT
#else
-const base::Feature kDefaultEnableGpuRasterization{
- "DefaultEnableGpuRasterization", base::FEATURE_DISABLED_BY_DEFAULT};
+ base::FEATURE_DISABLED_BY_DEFAULT
#endif
+};
// Enable out of process rasterization by default. This can still be overridden
-// by --enable-oop-rasterization or --disable-oop-rasterization.
-#if defined(OS_ANDROID) || defined(OS_CHROMEOS) || defined(OS_MAC) || \
- defined(OS_WIN) || defined(OS_FUCHSIA)
+// by --disable-oop-rasterization.
const base::Feature kDefaultEnableOopRasterization{
"DefaultEnableOopRasterization", base::FEATURE_ENABLED_BY_DEFAULT};
-#else
-const base::Feature kDefaultEnableOopRasterization{
- "DefaultEnableOopRasterization", base::FEATURE_DISABLED_BY_DEFAULT};
-#endif
#if defined(OS_WIN)
// Use a high priority for GPU process on Windows.
const base::Feature kGpuProcessHighPriorityWin{
"GpuProcessHighPriorityWin", base::FEATURE_ENABLED_BY_DEFAULT};
+
+// Compute the root damage rect from the surface damage list for overlays on
+// Windows.
+const base::Feature kDirectCompositionUseOverlayDamageList{
+ "DirectCompositionUseOverlayDamageList", base::FEATURE_ENABLED_BY_DEFAULT};
#endif
// Use ThreadPriority::DISPLAY for GPU main, viz compositor and IO threads.
-#if defined(OS_ANDROID) || defined(OS_CHROMEOS) || defined(OS_WIN)
const base::Feature kGpuUseDisplayThreadPriority{
- "GpuUseDisplayThreadPriority", base::FEATURE_ENABLED_BY_DEFAULT};
+ "GpuUseDisplayThreadPriority",
+#if defined(OS_ANDROID) || BUILDFLAG(IS_ASH) || defined(OS_WIN)
+ base::FEATURE_ENABLED_BY_DEFAULT
#else
-const base::Feature kGpuUseDisplayThreadPriority{
- "GpuUseDisplayThreadPriority", base::FEATURE_DISABLED_BY_DEFAULT};
+ base::FEATURE_DISABLED_BY_DEFAULT
#endif
-
-// Gpu watchdog V2 to simplify the logic and reduce GPU hangs
-const base::Feature kGpuWatchdogV2{"GpuWatchdogV2",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
-// Use a different set of watchdog timeouts on V1
-const base::Feature kGpuWatchdogV1NewTimeout{"GpuWatchdogV1NewTimeout",
- base::FEATURE_ENABLED_BY_DEFAULT};
+};
// Use a different set of watchdog timeouts on V2
const base::Feature kGpuWatchdogV2NewTimeout{"GpuWatchdogV2NewTimeout",
@@ -112,6 +157,7 @@ const base::Feature kVaapiWebPImageDecodeAcceleration{
// Enable Vulkan graphics backend for compositing and rasterization. Defaults to
// native implementation if --use-vulkan flag is not used. Otherwise
// --use-vulkan will be followed.
+// Note Android WebView uses kWebViewVulkan instead of this.
const base::Feature kVulkan{"Vulkan", base::FEATURE_DISABLED_BY_DEFAULT};
// Enable SkiaRenderer Dawn graphics backend. On Windows this will use D3D12,
@@ -123,6 +169,20 @@ const base::Feature kSkiaDawn{"SkiaDawn", base::FEATURE_DISABLED_BY_DEFAULT};
const base::Feature kEnableSharedImageForWebview{
"EnableSharedImageForWebview", base::FEATURE_ENABLED_BY_DEFAULT};
+// Enable GrShaderCache to use with Vulkan backend.
+const base::Feature kEnableGrShaderCacheForVulkan{
+ "EnableGrShaderCacheForVulkan", base::FEATURE_ENABLED_BY_DEFAULT};
+
+bool IsUsingVulkan() {
+ bool enable = base::FeatureList::IsEnabled(kVulkan);
+#if defined(OS_ANDROID)
+ enable = enable || (base::CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kWebViewDrawFunctorUsesVulkan) &&
+ base::FeatureList::IsEnabled(kWebViewVulkan));
+#endif
+ return enable;
+}
+
#if defined(OS_ANDROID)
bool IsAImageReaderEnabled() {
return base::FeatureList::IsEnabled(kAImageReader) &&
@@ -130,10 +190,44 @@ bool IsAImageReaderEnabled() {
}
bool IsAndroidSurfaceControlEnabled() {
+ const auto* build_info = base::android::BuildInfo::GetInstance();
+ auto disable_patterns =
+ base::SplitString(kAndroidSurfaceControlBlocklist.Get(), "|",
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL);
+ for (const auto& disable_pattern : disable_patterns) {
+ if (base::MatchPattern(build_info->device(), disable_pattern))
+ return false;
+ }
+
return IsAImageReaderEnabled() &&
base::FeatureList::IsEnabled(kAndroidSurfaceControl) &&
- gl::SurfaceControl::IsSupported();
+ gfx::SurfaceControl::IsSupported();
}
+
+// Many devices do not support more than 1 image to be acquired from the
+// AImageReader.(crbug.com/1051705). This method returns true for those
+// devices. Currently the list of device model names are sent from server side
+// via a finch config file. There is a known device MIBOX for which max size
+// should be 1 irrespecticve of the feature LimitAImageReaderMaxSizeToOne
+// enabled or not. Get() returns default value even if the feature is disabled.
+bool LimitAImageReaderMaxSizeToOne() {
+ return (FieldIsInBlocklist(base::android::BuildInfo::GetInstance()->model(),
+ kLimitAImageReaderMaxSizeToOneBlocklist.Get()));
+}
+
+// Zero copy is disabled if device can not support 3 max images.
+bool IsWebViewZeroCopyVideoEnabled() {
+ const bool limit_max_size_to_one = LimitAImageReaderMaxSizeToOne();
+ if (!IsAImageReaderEnabled() || limit_max_size_to_one)
+ return false;
+
+ if (!base::FeatureList::IsEnabled(kWebViewZeroCopyVideo))
+ return false;
+
+ return !(FieldIsInBlocklist(base::android::BuildInfo::GetInstance()->model(),
+ kWebViewZeroCopyVideoBlocklist.Get()));
+}
+
#endif
} // namespace features
diff --git a/chromium/gpu/config/gpu_finch_features.h b/chromium/gpu/config/gpu_finch_features.h
index 1fac4603a43..ea902ff8d01 100644
--- a/chromium/gpu/config/gpu_finch_features.h
+++ b/chromium/gpu/config/gpu_finch_features.h
@@ -20,6 +20,9 @@ namespace features {
GPU_EXPORT extern const base::Feature kUseGles2ForOopR;
GPU_EXPORT extern const base::Feature kAndroidSurfaceControl;
GPU_EXPORT extern const base::Feature kAImageReader;
+GPU_EXPORT extern const base::Feature kWebViewVulkan;
+GPU_EXPORT extern const base::Feature kLimitAImageReaderMaxSizeToOne;
+GPU_EXPORT extern const base::Feature kWebViewZeroCopyVideo;
#endif // defined(OS_ANDROID)
GPU_EXPORT extern const base::Feature kDefaultEnableGpuRasterization;
@@ -28,14 +31,12 @@ GPU_EXPORT extern const base::Feature kDefaultEnableOopRasterization;
#if defined(OS_WIN)
GPU_EXPORT extern const base::Feature kGpuProcessHighPriorityWin;
+
+GPU_EXPORT extern const base::Feature kDirectCompositionUseOverlayDamageList;
#endif
GPU_EXPORT extern const base::Feature kGpuUseDisplayThreadPriority;
-GPU_EXPORT extern const base::Feature kGpuWatchdogV2;
-
-GPU_EXPORT extern const base::Feature kGpuWatchdogV1NewTimeout;
-
GPU_EXPORT extern const base::Feature kGpuWatchdogV2NewTimeout;
#if defined(OS_MAC)
@@ -56,9 +57,14 @@ GPU_EXPORT extern const base::Feature kSkiaDawn;
GPU_EXPORT extern const base::Feature kEnableSharedImageForWebview;
+GPU_EXPORT extern const base::Feature kEnableGrShaderCacheForVulkan;
+
+GPU_EXPORT bool IsUsingVulkan();
#if defined(OS_ANDROID)
GPU_EXPORT bool IsAImageReaderEnabled();
GPU_EXPORT bool IsAndroidSurfaceControlEnabled();
+GPU_EXPORT bool LimitAImageReaderMaxSizeToOne();
+GPU_EXPORT bool IsWebViewZeroCopyVideoEnabled();
#endif
} // namespace features
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index ffe6c6556bd..f77dcfb081b 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -35,11 +35,14 @@
#include "ui/gl/init/create_gr_gl_interface.h"
#include "ui/gl/init/gl_factory.h"
+#if defined(USE_OZONE)
+#include "ui/base/ui_base_features.h" // nogncheck
+#include "ui/ozone/public/ozone_platform.h" // nogncheck
+#include "ui/ozone/public/platform_gl_egl_utility.h" // nogncheck
+#endif
+
#if defined(USE_X11)
-#include "ui/base/ui_base_features.h" // nogncheck
-#include "ui/gfx/linux/gpu_memory_buffer_support_x11.h"
-#include "ui/gfx/switches.h"
-#include "ui/gl/gl_visual_picker_glx.h"
+#include "ui/gl/gl_utils.h"
#endif
namespace {
@@ -476,7 +479,7 @@ void CollectGraphicsInfoForTesting(GPUInfo* gpu_info) {
#endif // OS_ANDROID
}
-bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info,
+bool CollectGpuExtraInfo(gfx::GpuExtraInfo* gpu_extra_info,
const GpuPreferences& prefs) {
// Populate the list of ANGLE features by querying the functions exposed by
// EGL_ANGLE_feature_control if it's available.
@@ -502,43 +505,20 @@ bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info,
}
}
-#if defined(USE_X11)
- if (features::IsUsingOzonePlatform())
+#if defined(USE_OZONE)
+ if (features::IsUsingOzonePlatform()) {
+ const auto* const egl_utility =
+ ui::OzonePlatform::GetInstance()->GetPlatformGLEGLUtility();
+ if (egl_utility)
+ egl_utility->CollectGpuExtraInfo(prefs.enable_native_gpu_memory_buffers,
+ *gpu_extra_info);
return true;
- // Create the GLVisualPickerGLX singleton now while the GbmSupportX11
- // singleton is busy being created on another thread.
- gl::GLVisualPickerGLX* visual_picker;
- if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL)
- visual_picker = gl::GLVisualPickerGLX::GetInstance();
-
- // TODO(https://crbug.com/1031269): Enable by default.
- if (prefs.enable_native_gpu_memory_buffers) {
- gpu_extra_info->gpu_memory_buffer_support_x11 =
- ui::GpuMemoryBufferSupportX11::GetInstance()->supported_configs();
- }
-
- if (gl::GetGLImplementation() == gl::kGLImplementationDesktopGL) {
- gpu_extra_info->system_visual =
- static_cast<uint32_t>(visual_picker->system_visual());
- gpu_extra_info->rgba_visual =
- static_cast<uint32_t>(visual_picker->rgba_visual());
-
- // With GLX, only BGR(A) buffer formats are supported. EGL does not have
- // this restriction.
- gpu_extra_info->gpu_memory_buffer_support_x11.erase(
- std::remove_if(gpu_extra_info->gpu_memory_buffer_support_x11.begin(),
- gpu_extra_info->gpu_memory_buffer_support_x11.end(),
- [&](gfx::BufferUsageAndFormat usage_and_format) {
- return visual_picker->GetFbConfigForFormat(
- usage_and_format.format) ==
- x11::Glx::FbConfig{};
- }),
- gpu_extra_info->gpu_memory_buffer_support_x11.end());
- } else if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE) {
- // ANGLE does not yet support EGL_EXT_image_dma_buf_import[_modifiers].
- gpu_extra_info->gpu_memory_buffer_support_x11.clear();
}
#endif
+#if defined(USE_X11)
+ gl::CollectX11GpuExtraInfo(prefs.enable_native_gpu_memory_buffers,
+ *gpu_extra_info);
+#endif
return true;
}
diff --git a/chromium/gpu/config/gpu_info_collector.h b/chromium/gpu/config/gpu_info_collector.h
index dfa92445ebb..5895beaf391 100644
--- a/chromium/gpu/config/gpu_info_collector.h
+++ b/chromium/gpu/config/gpu_info_collector.h
@@ -8,10 +8,10 @@
#include <stdint.h>
#include "build/build_config.h"
-#include "gpu/config/gpu_extra_info.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/gpu_export.h"
+#include "ui/gfx/gpu_extra_info.h"
#if defined(OS_WIN)
#include <d3dcommon.h>
@@ -84,7 +84,7 @@ void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
GPU_EXPORT void CollectGraphicsInfoForTesting(GPUInfo* gpu_info);
// Collect Graphics info related to the current process
-GPU_EXPORT bool CollectGpuExtraInfo(GpuExtraInfo* gpu_extra_info,
+GPU_EXPORT bool CollectGpuExtraInfo(gfx::GpuExtraInfo* gpu_extra_info,
const GpuPreferences& prefs);
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index 3621fab713a..f6ec72dfec5 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "38a74c624ca48a6acb2a6f427998be599b504eed"
+#define GPU_LISTS_VERSION "1884b7e8b607ede8072319ce1bf841d628aa79bd"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_preferences.h b/chromium/gpu/config/gpu_preferences.h
index 6b8587151d6..5e96dd115cd 100644
--- a/chromium/gpu/config/gpu_preferences.h
+++ b/chromium/gpu/config/gpu_preferences.h
@@ -11,6 +11,7 @@
#include "base/macros.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/gpu_export.h"
#include "media/media_buildflags.h"
#include "ui/gfx/buffer_types.h"
@@ -214,6 +215,9 @@ struct GPU_EXPORT GpuPreferences {
// Use Vulkan for rasterization and display compositing.
VulkanImplementationName use_vulkan = VulkanImplementationName::kNone;
+ // Enable using vulkan protected memory.
+ bool enable_vulkan_protected_memory = false;
+
// Enforce using vulkan protected memory.
bool enforce_vulkan_protected_memory = false;
@@ -224,6 +228,15 @@ struct GPU_EXPORT GpuPreferences {
// testing in order to detect regressions which crash Vulkan.
bool disable_vulkan_fallback_to_gl_for_testing = false;
+ // Heap memory limit for Vulkan. Allocations will fail when this limit is
+ // reached for a heap.
+ uint32_t vulkan_heap_memory_limit = 0u;
+
+ // Sync CPU memory limit for Vulkan. Submission of GPU work will be
+ // synchronize with the CPU in order to free released memory immediately
+ // when this limit is reached.
+ uint32_t vulkan_sync_cpu_memory_limit = 0u;
+
// Use Metal for rasterization and Skia-based display compositing. Note that
// this is compatible with GL-based display compositing.
bool enable_metal = false;
@@ -239,6 +252,11 @@ struct GPU_EXPORT GpuPreferences {
// Enable validation layers in Dawn backends.
bool enable_dawn_backend_validation = false;
+ // Enable the toggle Toggle::DisableRobustness when creating Dawn device for
+ // the investigation of the performance issues related to the implementation
+ // of robustness in Dawn.
+ bool disable_dawn_robustness = false;
+
// Enable measuring blocked time on GPU Main thread
bool enable_gpu_blocked_time_metric = false;
@@ -260,7 +278,7 @@ struct GPU_EXPORT GpuPreferences {
// ===================================
// Settings from //media/base/media_switches.h
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
// The direct VideoDecoder is disallowed in this particular SoC/platform. This
// flag is a reflection of whatever ChromeOS command line builder says.
bool platform_disallows_chromeos_direct_video_decoder = false;
diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc
index 63cd4d9dc14..21f3fd4aa8a 100644
--- a/chromium/gpu/config/gpu_preferences_unittest.cc
+++ b/chromium/gpu/config/gpu_preferences_unittest.cc
@@ -8,6 +8,7 @@
#include "base/command_line.h"
#include "base/message_loop/message_pump_type.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/config/gpu_switches.h"
#include "gpu/ipc/common/gpu_preferences.mojom.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -74,11 +75,17 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
right.watchdog_starts_backgrounded);
EXPECT_EQ(left.gr_context_type, right.gr_context_type);
EXPECT_EQ(left.use_vulkan, right.use_vulkan);
+ EXPECT_EQ(left.enable_vulkan_protected_memory,
+ right.enable_vulkan_protected_memory);
+ EXPECT_EQ(left.vulkan_heap_memory_limit, right.vulkan_heap_memory_limit);
+ EXPECT_EQ(left.vulkan_sync_cpu_memory_limit,
+ right.vulkan_sync_cpu_memory_limit);
EXPECT_EQ(left.enable_gpu_benchmarking_extension,
right.enable_gpu_benchmarking_extension);
EXPECT_EQ(left.enable_webgpu, right.enable_webgpu);
EXPECT_EQ(left.enable_dawn_backend_validation,
right.enable_dawn_backend_validation);
+ EXPECT_EQ(left.disable_dawn_robustness, right.disable_dawn_robustness);
EXPECT_EQ(left.enable_gpu_blocked_time_metric,
right.enable_gpu_blocked_time_metric);
EXPECT_EQ(left.enable_perf_data_collection,
@@ -88,7 +95,7 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
#endif
EXPECT_EQ(left.enable_native_gpu_memory_buffers,
right.enable_native_gpu_memory_buffers);
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
EXPECT_EQ(left.platform_disallows_chromeos_direct_video_decoder,
right.platform_disallows_chromeos_direct_video_decoder);
#endif
@@ -113,12 +120,6 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GpuPreferences default_prefs;
mojom::GpuPreferences prefs_mojom;
- // Make sure all fields are included in mojo struct.
- // TODO(zmo): This test isn't perfect. If a field isn't included in
- // mojom::GpuPreferences, the two struct sizes might still be equal due to
- // alignment.
- EXPECT_EQ(sizeof(default_prefs), sizeof(prefs_mojom));
-
#define GPU_PREFERENCES_FIELD(name, value) \
input_prefs.name = value; \
EXPECT_NE(default_prefs.name, input_prefs.name); \
@@ -174,6 +175,8 @@ TEST(GpuPreferencesTest, EncodeDecode) {
mojom::GrContextType::kVulkan)
GPU_PREFERENCES_FIELD_ENUM(use_vulkan, VulkanImplementationName::kNative,
mojom::VulkanImplementationName::kNative)
+ GPU_PREFERENCES_FIELD(vulkan_heap_memory_limit, 1);
+ GPU_PREFERENCES_FIELD(vulkan_sync_cpu_memory_limit, 1);
GPU_PREFERENCES_FIELD(enable_gpu_benchmarking_extension, true)
GPU_PREFERENCES_FIELD(enable_webgpu, true)
GPU_PREFERENCES_FIELD(enable_dawn_backend_validation, true)
@@ -184,7 +187,7 @@ TEST(GpuPreferencesTest, EncodeDecode) {
base::MessagePumpType::UI)
#endif
GPU_PREFERENCES_FIELD(enable_native_gpu_memory_buffers, true);
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
GPU_PREFERENCES_FIELD(platform_disallows_chromeos_direct_video_decoder,
true);
#endif
@@ -269,6 +272,8 @@ TEST(GpuPreferencesTest, DISABLED_DecodePreferences) {
PRINT_BOOL(watchdog_starts_backgrounded);
PRINT_INT(gr_context_type);
PRINT_INT(use_vulkan);
+ PRINT_INT(vulkan_heap_memory_limit);
+ PRINT_INT(vulkan_sync_cpu_memory_limit);
PRINT_BOOL(enable_gpu_benchmarking_extension);
PRINT_BOOL(enable_webgpu);
PRINT_BOOL(enable_dawn_backend_validation);
@@ -278,7 +283,7 @@ TEST(GpuPreferencesTest, DISABLED_DecodePreferences) {
PRINT_INT(message_pump_type);
#endif
PRINT_BOOL(enable_native_gpu_memory_buffers);
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
PRINT_BOOL(platform_disallows_chromeos_direct_video_decoder);
#endif
printf("}\n");
diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc
index 76af74cfffb..4387ce4e457 100644
--- a/chromium/gpu/config/gpu_switches.cc
+++ b/chromium/gpu/config/gpu_switches.cc
@@ -10,6 +10,10 @@ namespace switches {
// Overrides the kEnableGpuRasterization flag.
const char kDisableGpuRasterization[] = "disable-gpu-rasterization";
+// Disables mipmap generation in Skia. Used a workaround for select low memory
+// devices, see https://crbug.com/1138979 for details.
+const char kDisableMipmapGeneration[] = "disable-mipmap-generation";
+
// Allow heuristics to determine when a layer tile should be drawn with the
// Skia GPU backend. Only valid with GPU accelerated compositing.
const char kEnableGpuRasterization[] = "enable-gpu-rasterization";
@@ -46,6 +50,9 @@ const char kEnableUnsafeWebGPU[] = "enable-unsafe-webgpu";
// Enable validation layers in Dawn backends.
const char kEnableDawnBackendValidation[] = "enable-dawn-backend-validation";
+// Enable the toggle Toggle::DisableRobustness when creating Dawn device.
+const char kDisableDawnRobustness[] = "disable-dawn-robustness";
+
// Increases the priority (to REALTIME_AUDIO) of gpu process and compositor
// thread.
// This is only to be used for perf tests on macOS for more reliable values.
@@ -81,4 +88,27 @@ const char kGpuRevision[] = "gpu-revision";
// collection GPU process.
const char kGpuDriverVersion[] = "gpu-driver-version";
+// Indicate that the this is being used by Android WebView and its draw functor
+// is using vulkan.
+const char kWebViewDrawFunctorUsesVulkan[] = "webview-draw-functor-uses-vulkan";
+
+// Enables using protected memory for vulkan resources.
+const char kEnableVulkanProtectedMemory[] = "enable-vulkan-protected-memory";
+
+// Forces vulkan resources to use protected memory for vulkan compositing.
+const char kEnforceVulkanProtectedMemory[] = "enforce-vulkan-protected-memory";
+
+// Disables falling back to GL based hardware rendering if initializing Vulkan
+// fails. This is to allow tests to catch regressions in Vulkan.
+const char kDisableVulkanFallbackToGLForTesting[] =
+ "disable-vulkan-fallback-to-gl-for-testing";
+
+// Specifies the heap limit for Vulkan memory.
+// TODO(crbug/1158000): Remove this switch.
+const char kVulkanHeapMemoryLimitMb[] = "vulkan-heap-memory-limit-mb";
+
+// Specifies the sync CPU limit for total Vulkan memory.
+// TODO(crbug/1158000): Remove this switch.
+const char kVulkanSyncCpuMemoryLimitMb[] = "vulkan-sync-cpu-memory-limit-mb";
+
} // namespace switches
diff --git a/chromium/gpu/config/gpu_switches.h b/chromium/gpu/config/gpu_switches.h
index 8d2f036f57d..eb91450b35f 100644
--- a/chromium/gpu/config/gpu_switches.h
+++ b/chromium/gpu/config/gpu_switches.h
@@ -10,6 +10,7 @@
namespace switches {
GPU_EXPORT extern const char kDisableGpuRasterization[];
+GPU_EXPORT extern const char kDisableMipmapGeneration[];
GPU_EXPORT extern const char kEnableGpuRasterization[];
GPU_EXPORT extern const char kGpuBlocklistTestGroup[];
GPU_EXPORT extern const char kGpuDriverBugListTestGroup[];
@@ -20,6 +21,7 @@ GPU_EXPORT extern const char kShaderDiskCacheSizeKB[];
GPU_EXPORT extern const char kDisableGpuProcessForDX12InfoCollection[];
GPU_EXPORT extern const char kEnableUnsafeWebGPU[];
GPU_EXPORT extern const char kEnableDawnBackendValidation[];
+GPU_EXPORT extern const char kDisableDawnRobustness[];
GPU_EXPORT extern const char kUseHighGPUThreadPriorityForPerfTests[];
GPU_EXPORT extern const char kNoDelayForDX12VulkanInfoCollection[];
GPU_EXPORT extern const char kEnableGpuBlockedTime[];
@@ -28,6 +30,12 @@ GPU_EXPORT extern const char kGpuDeviceId[];
GPU_EXPORT extern const char kGpuSubSystemId[];
GPU_EXPORT extern const char kGpuRevision[];
GPU_EXPORT extern const char kGpuDriverVersion[];
+GPU_EXPORT extern const char kWebViewDrawFunctorUsesVulkan[];
+GPU_EXPORT extern const char kEnableVulkanProtectedMemory[];
+GPU_EXPORT extern const char kEnforceVulkanProtectedMemory[];
+GPU_EXPORT extern const char kDisableVulkanFallbackToGLForTesting[];
+GPU_EXPORT extern const char kVulkanHeapMemoryLimitMb[];
+GPU_EXPORT extern const char kVulkanSyncCpuMemoryLimitMb[];
} // namespace switches
diff --git a/chromium/gpu/config/gpu_switching.cc b/chromium/gpu/config/gpu_switching.cc
index 0a9970e2372..403c30a11d0 100644
--- a/chromium/gpu/config/gpu_switching.cc
+++ b/chromium/gpu/config/gpu_switching.cc
@@ -18,6 +18,10 @@
#include "ui/gl/gl_switches.h"
#include "ui/gl/gpu_preference.h"
+#if defined(OS_MAC)
+#include "base/mac/mac_util.h"
+#endif // OS_MAC
+
namespace gpu {
namespace {
@@ -60,14 +64,24 @@ bool SwitchableGPUsSupported(const GPUInfo& gpu_info,
gl::kGLImplementationANGLEName)) {
return false;
}
+ // Always allow offline renderers on ARM-based macs.
+ // https://crbug.com/1131312
+ switch (base::mac::GetCPUType()) {
+ case base::mac::CPUType::kArm:
+ case base::mac::CPUType::kTranslatedIntel:
+ return true;
+ default:
+ break;
+ }
if (gpu_info.secondary_gpus.size() != 1) {
return false;
}
- // Only advertise that we have two GPUs to the rest of
- // Chrome's code if we find an Intel GPU and some other
- // vendor's GPU. Otherwise we don't understand the
- // configuration and don't deal well with it (an example being
- // the dual AMD GPUs in recent Mac Pros).
+
+ // Only advertise that we have two GPUs to the rest of Chrome's code if we
+ // find an Intel GPU and some other vendor's GPU. Otherwise we don't
+ // understand the configuration and don't deal well with it (an example being
+ // the dual AMD GPUs in recent Mac Pros). Motivation is explained in:
+ // http://crbug.com/380026#c70.
const uint32_t kVendorIntel = 0x8086;
return ((gpu_info.gpu.vendor_id == kVendorIntel &&
gpu_info.secondary_gpus[0].vendor_id != kVendorIntel) ||
diff --git a/chromium/gpu/config/gpu_test_config.cc b/chromium/gpu/config/gpu_test_config.cc
index 49219e450c7..6077861ce54 100644
--- a/chromium/gpu/config/gpu_test_config.cc
+++ b/chromium/gpu/config/gpu_test_config.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/system/sys_info.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_info_collector.h"
#include "gpu/config/gpu_test_expectations_parser.h"
@@ -25,7 +26,7 @@ namespace gpu {
namespace {
GPUTestConfig::OS GetCurrentOS() {
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
return GPUTestConfig::kOsChromeOS;
#elif defined(OS_LINUX) || defined(OS_OPENBSD)
return GPUTestConfig::kOsLinux;
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index 105f4895154..674cd223a9d 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -47,7 +47,7 @@
#if defined(OS_ANDROID)
#include "base/no_destructor.h"
#include "base/synchronization/lock.h"
-#include "ui/gl/android/android_surface_control_compat.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#include "ui/gl/gl_surface_egl.h"
#include "ui/gl/init/gl_factory.h"
#endif // OS_ANDROID
@@ -121,7 +121,7 @@ GpuFeatureStatus GetAndroidSurfaceControlFeatureStatus(
if (!gl::GLSurfaceEGL::IsAndroidNativeFenceSyncSupported())
return kGpuFeatureStatusDisabled;
- DCHECK(gl::SurfaceControl::IsSupported());
+ DCHECK(gfx::SurfaceControl::IsSupported());
return kGpuFeatureStatusEnabled;
#endif
}
@@ -173,7 +173,7 @@ GpuFeatureStatus GetGpuRasterizationFeatureStatus(
// Enable gpu rasterization for vulkan, unless it is overridden by
// commandline.
- if (base::FeatureList::IsEnabled(features::kVulkan) &&
+ if (features::IsUsingVulkan() &&
!base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine(
features::kDefaultEnableGpuRasterization.name,
base::FeatureList::OVERRIDE_DISABLE_FEATURE)) {
@@ -193,13 +193,6 @@ GpuFeatureStatus GetOopRasterizationFeatureStatus(
const base::CommandLine& command_line,
const GpuPreferences& gpu_preferences,
const GPUInfo& gpu_info) {
-#if defined(OS_WIN)
- // On Windows, using the validating decoder causes a lot of errors. This
- // could be fixed independently, but validating decoder is going away.
- // See: http://crbug.com/949773.
- if (!gpu_info.passthrough_cmd_decoder)
- return kGpuFeatureStatusDisabled;
-#endif
// OOP rasterization requires GPU rasterization, so if blocklisted or
// disabled, report the same.
auto status =
@@ -219,7 +212,7 @@ GpuFeatureStatus GetOopRasterizationFeatureStatus(
// Enable OOP rasterization for vulkan, unless it is overridden by
// commandline.
- if (base::FeatureList::IsEnabled(features::kVulkan) &&
+ if (features::IsUsingVulkan() &&
!base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine(
features::kDefaultEnableOopRasterization.name,
base::FeatureList::OVERRIDE_DISABLE_FEATURE)) {
@@ -267,46 +260,6 @@ GpuFeatureStatus Get2DCanvasFeatureStatus(
return kGpuFeatureStatusEnabled;
}
-GpuFeatureStatus GetFlash3DFeatureStatus(
- const std::set<int>& blocklisted_features,
- bool use_swift_shader) {
- if (use_swift_shader) {
- // This is for testing only. Chrome should exercise the GPU accelerated
- // path on top of SwiftShader driver.
- return kGpuFeatureStatusEnabled;
- }
- if (blocklisted_features.count(GPU_FEATURE_TYPE_FLASH3D))
- return kGpuFeatureStatusBlocklisted;
- return kGpuFeatureStatusEnabled;
-}
-
-GpuFeatureStatus GetFlashStage3DFeatureStatus(
- const std::set<int>& blocklisted_features,
- bool use_swift_shader) {
- if (use_swift_shader) {
- // This is for testing only. Chrome should exercise the GPU accelerated
- // path on top of SwiftShader driver.
- return kGpuFeatureStatusEnabled;
- }
- if (blocklisted_features.count(GPU_FEATURE_TYPE_FLASH_STAGE3D))
- return kGpuFeatureStatusBlocklisted;
- return kGpuFeatureStatusEnabled;
-}
-
-GpuFeatureStatus GetFlashStage3DBaselineFeatureStatus(
- const std::set<int>& blocklisted_features,
- bool use_swift_shader) {
- if (use_swift_shader) {
- // This is for testing only. Chrome should exercise the GPU accelerated
- // path on top of SwiftShader driver.
- return kGpuFeatureStatusEnabled;
- }
- if (blocklisted_features.count(GPU_FEATURE_TYPE_FLASH_STAGE3D) ||
- blocklisted_features.count(GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE))
- return kGpuFeatureStatusBlocklisted;
- return kGpuFeatureStatusEnabled;
-}
-
GpuFeatureStatus GetAcceleratedVideoDecodeFeatureStatus(
const std::set<int>& blocklisted_features,
bool use_swift_shader) {
@@ -436,14 +389,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithHardwareAccelerationDisabled() {
kGpuFeatureStatusSoftware;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] =
kGpuFeatureStatusSoftware;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] =
- kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D] =
- kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE] =
kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE] =
- kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL2] =
@@ -474,14 +421,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithNoGpu() {
kGpuFeatureStatusSoftware;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] =
kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] =
- kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D] =
- kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE] =
kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE] =
- kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL2] =
@@ -512,14 +453,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoForSwiftShader() {
kGpuFeatureStatusSoftware;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] =
kGpuFeatureStatusSoftware;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] =
- kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D] =
- kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE] =
kGpuFeatureStatusDisabled;
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE] =
- kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_GPU_RASTERIZATION] =
kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL2] =
@@ -601,13 +536,6 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
GetWebGL2FeatureStatus(blocklisted_features, use_swift_shader);
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] =
Get2DCanvasFeatureStatus(blocklisted_features, use_swift_shader);
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH3D] =
- GetFlash3DFeatureStatus(blocklisted_features, use_swift_shader);
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D] =
- GetFlashStage3DFeatureStatus(blocklisted_features, use_swift_shader);
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_FLASH_STAGE3D_BASELINE] =
- GetFlashStage3DBaselineFeatureStatus(blocklisted_features,
- use_swift_shader);
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE] =
GetAcceleratedVideoDecodeFeatureStatus(blocklisted_features,
use_swift_shader);
diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt
index f5735871a9f..0e2328e5591 100644
--- a/chromium/gpu/config/gpu_workaround_list.txt
+++ b/chromium/gpu/config/gpu_workaround_list.txt
@@ -38,7 +38,6 @@ disable_ext_draw_buffers
disable_gl_rgb_format
disable_half_float_for_gmb
disable_imagebitmap_from_video_using_gpu
-disable_larger_than_screen_overlays
disable_mediafoundation_async_h264_encoding
disable_multisampling_color_mask_usage
disable_nv12_dxgi_video
@@ -49,7 +48,6 @@ disable_program_cache
disable_program_caching_for_transform_feedback
disable_program_disk_cache
disable_software_to_accelerated_canvas_upgrade
-disable_texture_cube_map_seamless
disable_texture_storage
disable_timestamp_queries
disable_vp_scaling
@@ -80,7 +78,6 @@ force_rgb10a2_overlay_support_flags
force_update_scissor_state_when_binding_fbo0
get_frag_data_info_bug
gl_clear_broken
-ignore_egl_sync_failures
init_gl_position_in_vertex_shader
init_one_cube_map_level_before_copyteximage
init_texture_max_anisotropy
@@ -89,7 +86,6 @@ init_vertex_attributes
limit_d3d11_video_decoder_to_11_0
max_3d_array_texture_size_1024
max_copy_texture_chromium_size_1048576
-max_copy_texture_chromium_size_262144
max_msaa_sample_count_2
max_msaa_sample_count_4
max_texture_size_limit_4096
@@ -126,7 +122,6 @@ use_client_side_arrays_for_stream_buffers
use_copyteximage2d_instead_of_readpixels_on_multisampled_textures
use_empty_video_hdr_metadata
use_eqaa_storage_samples_2
-use_es2_for_oopr
use_gpu_driver_workaround_for_testing
use_intermediary_for_copy_texture_image
use_non_zero_size_for_client_side_stream_buffers
diff --git a/chromium/gpu/config/skia_limits.cc b/chromium/gpu/config/skia_limits.cc
index 82fb1dc9a58..5293e4314ea 100644
--- a/chromium/gpu/config/skia_limits.cc
+++ b/chromium/gpu/config/skia_limits.cc
@@ -25,12 +25,7 @@ void DetermineGrCacheLimitsFromAvailableMemory(
#if !defined(OS_NACL)
// The limit of the bytes allocated toward GPU resources in the GrContext's
// GPU cache.
-#if defined(OS_FUCHSIA)
- // Reduce protected budget on fuchsia due to https://fxb/36620.
- constexpr size_t kMaxLowEndGaneshResourceCacheBytes = 24 * 1024 * 1024;
-#else
constexpr size_t kMaxLowEndGaneshResourceCacheBytes = 48 * 1024 * 1024;
-#endif // defined(OS_FUCHSIA)
constexpr size_t kMaxHighEndGaneshResourceCacheBytes = 256 * 1024 * 1024;
// Limits for glyph cache textures.
constexpr size_t kMaxLowEndGlyphCacheTextureBytes = 1024 * 512 * 4;
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index 1df09bc1c4e..09ee17d06ec 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -13,19 +13,17 @@
"multi_gpu_category": "any",
"features": [
"accelerated_webgl",
- "flash3d",
- "flash_stage3d",
"gpu_rasterization"
]
},
{
"id": 3,
"description": "GL driver is software rendered. GPU acceleration is disabled",
- "cr_bugs": [59302, 315217],
+ "cr_bugs": [59302, 315217, 1155974],
"os": {
"type": "linux"
},
- "gl_renderer": "(?i).*software.*",
+ "gl_renderer": "(?i).*(software|llvmpipe|softpipe).*",
"features": [
"all"
]
@@ -38,8 +36,6 @@
"device_id": ["0x27AE", "0x27A2"],
"features": [
"accelerated_webgl",
- "flash3d",
- "flash_stage3d",
"accelerated_2d_canvas"
]
},
@@ -100,8 +96,6 @@
"multi_gpu_category": "any",
"features": [
"accelerated_webgl",
- "flash3d",
- "flash_stage3d",
"gpu_rasterization"
]
},
@@ -337,7 +331,7 @@
},
{
"id": 50,
- "description": "Disable VMware software renderer on older Mesa",
+ "description": "Disable VMware virtualized renderer on older Mesa",
"cr_bugs": [145531, 332596, 571899, 629434],
"os": {
"type": "linux"
@@ -358,16 +352,6 @@
]
},
{
- "id": 53,
- "description": "The Intel GMA500 is too slow for Stage3D",
- "cr_bugs": [152096],
- "vendor_id": "0x8086",
- "device_id": ["0x8108", "0x8109"],
- "features": [
- "flash_stage3d"
- ]
- },
- {
"id": 56,
"description": "NVIDIA linux drivers are unstable when using multiple Open GL contexts and with low memory",
"cr_bugs": [145600],
@@ -381,9 +365,7 @@
"value": "331.38"
},
"features": [
- "accelerated_video_decode",
- "flash3d",
- "flash_stage3d"
+ "accelerated_video_decode"
]
},
{
@@ -439,57 +421,6 @@
]
},
{
- "id": 69,
- "description": "NVIDIA driver 196.21 is buggy with Stage3D baseline mode",
- "cr_bugs": [172771],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x10de",
- "driver_version": {
- "comment": "INF_version: 8.17.11.9621; date: 01/11/2010",
- "op": "=",
- "value": "8.17.11.9621"
- },
- "features": [
- "flash_stage3d_baseline"
- ]
- },
- {
- "id": 70,
- "description": "NVIDIA driver 182.67 is buggy with Stage3D baseline mode",
- "cr_bugs": [172771],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x10de",
- "driver_version": {
- "comment": "INF_version: 7.15.11.8267; date: 05/12/2009",
- "op": "=",
- "value": "7.15.11.8267"
- },
- "features": [
- "flash_stage3d_baseline"
- ]
- },
- {
- "id": 71,
- "description": "All Intel drivers before 8.15.10.2021 are buggy with Stage3D baseline mode",
- "cr_bugs": [172771],
- "os": {
- "type": "win"
- },
- "vendor_id": "0x8086",
- "driver_version": {
- "comment": "INF_version: 8.15.10.2021; date: 12/14/2009",
- "op": "<",
- "value": "8.15.10.2021"
- },
- "features": [
- "flash_stage3d_baseline"
- ]
- },
- {
"id": 72,
"description": "NVIDIA GeForce 6200 LE is buggy with WebGL",
"cr_bugs": [232529],
@@ -1247,10 +1178,17 @@
},
{
"id": 140,
- "comment": "Corresponds to GPU driver bugs #19, #214",
- "description": "MSAA and depth texture buggy on Adreno 3xx, also disable WebGL2",
- "cr_bugs": [449116, 698197],
+ "comment": "Corresponds to GPU driver bugs #19, #214, #359",
+ "description": "MSAA and depth texture buggy on Adreno 3xx prior to Android 9, also disable WebGL2",
+ "cr_bugs": [449116, 698197, 1042214],
"gl_renderer": "Adreno \\(TM\\) 3.*",
+ "os": {
+ "type": "android",
+ "version": {
+ "op": "<",
+ "value": "9.0"
+ }
+ },
"features": [
"accelerated_webgl2"
]
@@ -1632,8 +1570,6 @@
},
"features": [
"accelerated_webgl",
- "flash3d",
- "flash_stage3d",
"accelerated_2d_canvas"
]
},
@@ -1651,8 +1587,6 @@
},
"features": [
"accelerated_webgl",
- "flash3d",
- "flash_stage3d",
"accelerated_2d_canvas"
]
},
@@ -1685,9 +1619,33 @@
"op": "=",
"value": "8.17.10"
},
+ "exceptions": [
+ {
+ "driver_version": {
+ "op": ">=",
+ "value": "8.17.10.1433"
+ }
+ }
+ ],
"features": [
"all"
]
+ },
+ {
+ "id": 167,
+ "description": "8.17.10.1433 AMD drivers trigger hands in video stack",
+ "cr_bugs": [1160623],
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "driver_version": {
+ "op": "=",
+ "value": "8.17.10.1433"
+ },
+ "features": [
+ "accelerated_video_decode"
+ ]
}
]
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index 2e5c0300ab4..a39ef35be00 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -5,7 +5,7 @@
#include "gpu/gles2_conform_support/egl/context.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "gpu/command_buffer/client/gles2_implementation.h"
#include "gpu/command_buffer/client/gles2_lib.h"
diff --git a/chromium/gpu/ipc/BUILD.gn b/chromium/gpu/ipc/BUILD.gn
index 98fbe84595f..c418f423f54 100644
--- a/chromium/gpu/ipc/BUILD.gn
+++ b/chromium/gpu/ipc/BUILD.gn
@@ -8,6 +8,8 @@ component("gl_in_process_context") {
sources = [
"command_buffer_task_executor.cc",
"command_buffer_task_executor.h",
+ "display_compositor_memory_and_task_controller_on_gpu.cc",
+ "display_compositor_memory_and_task_controller_on_gpu.h",
"gl_in_process_context.cc",
"gl_in_process_context.h",
"gl_in_process_context_export.h",
diff --git a/chromium/gpu/ipc/client/gpu_context_tests.h b/chromium/gpu/ipc/client/gpu_context_tests.h
index a45ae619fc4..1ffa5db3146 100644
--- a/chromium/gpu/ipc/client/gpu_context_tests.h
+++ b/chromium/gpu/ipc/client/gpu_context_tests.h
@@ -135,7 +135,7 @@ CONTEXT_TEST_F(SignalTest, InvalidSignalQueryUnboundTest) {
// due to inconsistent initialization of InProcessCommandBuffer which
// isn't used on that platform. Restrict it to Android for now.
-#if defined(OS_ANDROID)
+#if defined(OS_ANDROID) || defined(OS_WIN)
class GpuFenceTest : public ContextTestBase {
public:
@@ -189,7 +189,7 @@ CONTEXT_TEST_F(GpuFenceTest, BasicGpuFenceTest) {
gl_->DestroyGpuFenceCHROMIUM(id2);
}
-#endif // defined(OS_ANDROID)
+#endif // defined(OS_ANDROID) || defined(OS_WIN)
} // namespace
diff --git a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
index f289a6e3786..7723fb472d6 100644
--- a/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/gpu_in_process_context_tests.cc
@@ -41,7 +41,9 @@ class ContextTestBase : public testing::Test {
/*surface=*/nullptr, /*offscreen=*/true,
/*window=*/gpu::kNullSurfaceHandle, attributes,
gpu::SharedMemoryLimits(), gpu_memory_buffer_manager_.get(),
- /*image_factory=*/nullptr, base::ThreadTaskRunnerHandle::Get());
+ /*image_factory=*/nullptr, /*gpu_task_runner_helper=*/nullptr,
+ /*display_compositor_memory_and_task_contoller_on_gpu=*/nullptr,
+ base::ThreadTaskRunnerHandle::Get());
DCHECK_EQ(result, gpu::ContextResult::kSuccess);
return context;
}
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index 964fb5b76d4..ffa0fc321a6 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -210,7 +210,6 @@ mojom("interfaces") {
"context_result.mojom",
"device_perf_info.mojom",
"dx_diag_node.mojom",
- "gpu_extra_info.mojom",
"gpu_feature_info.mojom",
"gpu_info.mojom",
"gpu_peak_memory.mojom",
@@ -354,24 +353,6 @@ mojom("interfaces") {
{
types = [
{
- mojom = "gpu.mojom.GpuExtraInfo"
- cpp = "::gpu::GpuExtraInfo"
- },
- {
- mojom = "gpu.mojom.ANGLEFeature"
- cpp = "::gpu::ANGLEFeature"
- },
- ]
- traits_sources = [ "gpu_extra_info_mojom_traits.cc" ]
- traits_headers = [ "gpu_extra_info_mojom_traits.h" ]
- traits_public_deps = [
- "//gpu/config",
- "//ui/gfx/geometry/mojom",
- ]
- },
- {
- types = [
- {
mojom = "gpu.mojom.GpuFeatureStatus"
cpp = "::gpu::GpuFeatureStatus"
},
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index e2138c149a4..1466177dffb 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -127,6 +127,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_420v_disabled_for_video_frames)
IPC_STRUCT_TRAITS_MEMBER(image_ar30)
IPC_STRUCT_TRAITS_MEMBER(image_ab30)
+ IPC_STRUCT_TRAITS_MEMBER(image_ycbcr_p010)
IPC_STRUCT_TRAITS_MEMBER(render_buffer_format_bgra8888)
IPC_STRUCT_TRAITS_MEMBER(occlusion_query)
IPC_STRUCT_TRAITS_MEMBER(occlusion_query_boolean)
diff --git a/chromium/gpu/ipc/common/gpu_extra_info.mojom b/chromium/gpu/ipc/common/gpu_extra_info.mojom
deleted file mode 100644
index 556fa80e4b6..00000000000
--- a/chromium/gpu/ipc/common/gpu_extra_info.mojom
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// gpu/config/gpu_extra_info.h
-module gpu.mojom;
-
-import "ui/gfx/mojom/buffer_types.mojom";
-
-// gpu::ANGLEFeature
-struct ANGLEFeature {
- string name;
- string category;
- string description;
- string bug;
- string status;
- string condition;
-};
-
-// gpu:GpuExtraInfo
-struct GpuExtraInfo {
- // List of features queried from ANGLE
- array<ANGLEFeature> angle_features;
-
- [EnableIf=use_x11]
- uint64 system_visual;
- [EnableIf=use_x11]
- uint64 rgba_visual;
- [EnableIf=use_x11]
- array<gfx.mojom.BufferUsageAndFormat> gpu_memory_buffer_support_x11;
-};
diff --git a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc
deleted file mode 100644
index fe765a71caa..00000000000
--- a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.cc
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/common/gpu_extra_info_mojom_traits.h"
-
-#include "build/build_config.h"
-#include "ui/gfx/mojom/buffer_types_mojom_traits.h"
-
-namespace mojo {
-
-// static
-bool StructTraits<gpu::mojom::ANGLEFeatureDataView, gpu::ANGLEFeature>::Read(
- gpu::mojom::ANGLEFeatureDataView data,
- gpu::ANGLEFeature* out) {
- return data.ReadName(&out->name) && data.ReadCategory(&out->category) &&
- data.ReadDescription(&out->description) && data.ReadBug(&out->bug) &&
- data.ReadStatus(&out->status) && data.ReadCondition(&out->condition);
-}
-
-// static
-bool StructTraits<gpu::mojom::GpuExtraInfoDataView, gpu::GpuExtraInfo>::Read(
- gpu::mojom::GpuExtraInfoDataView data,
- gpu::GpuExtraInfo* out) {
- if (!data.ReadAngleFeatures(&out->angle_features))
- return false;
-#if defined(USE_X11)
- out->system_visual = data.system_visual();
- out->rgba_visual = data.rgba_visual();
- if (!data.ReadGpuMemoryBufferSupportX11(&out->gpu_memory_buffer_support_x11))
- return false;
-#endif
- return true;
-}
-
-} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h
deleted file mode 100644
index cc80a9c35c1..00000000000
--- a/chromium/gpu/ipc/common/gpu_extra_info_mojom_traits.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_COMMON_GPU_EXTRA_INFO_MOJOM_TRAITS_H_
-#define GPU_IPC_COMMON_GPU_EXTRA_INFO_MOJOM_TRAITS_H_
-
-#include "gpu/config/gpu_extra_info.h"
-#include "gpu/ipc/common/gpu_extra_info.mojom.h"
-#include "ui/gfx/mojom/buffer_types_mojom_traits.h"
-
-namespace mojo {
-
-template <>
-struct StructTraits<gpu::mojom::ANGLEFeatureDataView, gpu::ANGLEFeature> {
- static bool Read(gpu::mojom::ANGLEFeatureDataView data,
- gpu::ANGLEFeature* out);
-
- static const std::string& name(const gpu::ANGLEFeature& input) {
- return input.name;
- }
-
- static const std::string& category(const gpu::ANGLEFeature& input) {
- return input.category;
- }
-
- static const std::string& description(const gpu::ANGLEFeature& input) {
- return input.description;
- }
-
- static const std::string& bug(const gpu::ANGLEFeature& input) {
- return input.bug;
- }
-
- static const std::string& status(const gpu::ANGLEFeature& input) {
- return input.status;
- }
-
- static const std::string& condition(const gpu::ANGLEFeature& input) {
- return input.condition;
- }
-};
-
-template <>
-struct StructTraits<gpu::mojom::GpuExtraInfoDataView, gpu::GpuExtraInfo> {
- static bool Read(gpu::mojom::GpuExtraInfoDataView data,
- gpu::GpuExtraInfo* out);
-
- static const std::vector<gpu::ANGLEFeature>& angle_features(
- const gpu::GpuExtraInfo& input) {
- return input.angle_features;
- }
-
-#if defined(USE_X11)
- static uint64_t system_visual(const gpu::GpuExtraInfo& input) {
- return input.system_visual;
- }
-
- static uint64_t rgba_visual(const gpu::GpuExtraInfo& input) {
- return input.rgba_visual;
- }
-
- static const std::vector<gfx::BufferUsageAndFormat>&
- gpu_memory_buffer_support_x11(const gpu::GpuExtraInfo& input) {
- return input.gpu_memory_buffer_support_x11;
- }
-#endif
-};
-
-} // namespace mojo
-
-#endif // GPU_IPC_COMMON_GPU_EXTRA_INFO_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
index 1fd21b68e83..cd90a79c6d6 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.cc
@@ -8,12 +8,12 @@
#include "base/android/android_hardware_buffer_compat.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#include "ui/gfx/geometry/size.h"
-#include "ui/gl/android/android_surface_control_compat.h"
namespace gpu {
@@ -55,7 +55,7 @@ AHardwareBuffer_Desc GetBufferDescription(const gfx::Size& size,
desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
if (usage == gfx::BufferUsage::SCANOUT)
- desc.usage |= gl::SurfaceControl::RequiredUsage();
+ desc.usage |= gfx::SurfaceControl::RequiredUsage();
break;
default:
NOTREACHED();
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
index 798479aab9e..af73c5761cb 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
@@ -5,7 +5,7 @@
#include <wrl.h>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc
index 46b10a7452f..65c4aeea41e 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_io_surface.cc
@@ -5,7 +5,7 @@
#include "gpu/ipc/common/gpu_memory_buffer_impl_io_surface.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/debug/dump_without_crashing.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
@@ -18,19 +18,22 @@ namespace {
// The maximum number of times to dump before throttling (to avoid sending
// thousands of crash dumps).
+
const int kMaxCrashDumps = 10;
uint32_t LockFlags(gfx::BufferUsage usage) {
switch (usage) {
case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
- return kIOSurfaceLockAvoidSync;
case gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE:
+ // The AvoidSync call has the property that it will not preserve the
+ // previous contents of the buffer if those contents were written by a
+ // GPU.
+ return kIOSurfaceLockAvoidSync;
+ case gfx::BufferUsage::SCANOUT_VEA_CPU_READ:
// This constant is used for buffers used by video capture. On macOS,
// these buffers are only ever written to in the capture process,
- // directly as IOSurfaces. Once they are sent to other processes, no CPU
- // writes are performed.
- // TODO(https://crbug.com/1130101): A more accurate usage constant may be
- // appropriate.
+ // directly as IOSurfaces.
+ // Once they are sent to other processes, no CPU writes are performed.
return kIOSurfaceLockReadOnly;
case gfx::BufferUsage::GPU_READ:
case gfx::BufferUsage::SCANOUT:
@@ -38,6 +41,7 @@ uint32_t LockFlags(gfx::BufferUsage usage) {
case gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE:
case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE:
return 0;
}
NOTREACHED();
@@ -67,13 +71,12 @@ GpuMemoryBufferImplIOSurface::CreateFromHandle(
gfx::BufferFormat format,
gfx::BufferUsage usage,
DestructionCallback callback) {
- if (!handle.mach_port) {
- LOG(ERROR) << "Invalid IOSurface mach port returned to client.";
+ if (!handle.io_surface) {
+ LOG(ERROR) << "Invalid IOSurface returned to client.";
return nullptr;
}
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
- IOSurfaceLookupFromMachPort(handle.mach_port.get()));
+ gfx::ScopedIOSurface io_surface = handle.io_surface;
if (!io_surface) {
LOG(ERROR) << "Failed to open IOSurface via mach port returned to client.";
static int dump_counter = kMaxCrashDumps;
@@ -101,13 +104,11 @@ base::OnceClosure GpuMemoryBufferImplIOSurface::AllocateForTesting(
gfx::BufferFormat format,
gfx::BufferUsage usage,
gfx::GpuMemoryBufferHandle* handle) {
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
- gfx::CreateIOSurface(size, format));
- DCHECK(io_surface);
gfx::GpuMemoryBufferId kBufferId(1);
handle->type = gfx::IO_SURFACE_BUFFER;
handle->id = kBufferId;
- handle->mach_port.reset(IOSurfaceCreateMachPort(io_surface));
+ handle->io_surface.reset(gfx::CreateIOSurface(size, format));
+ DCHECK(handle->io_surface);
return base::DoNothing();
}
@@ -152,7 +153,7 @@ gfx::GpuMemoryBufferHandle GpuMemoryBufferImplIOSurface::CloneHandle() const {
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::IO_SURFACE_BUFFER;
handle.id = id_;
- handle.mach_port.reset(IOSurfaceCreateMachPort(io_surface_));
+ handle.io_surface = io_surface_;
return handle;
}
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
index 2d1d14dbf17..fd0c7ab97d1 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_shared_memory.cc
@@ -9,7 +9,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/format_macros.h"
#include "base/memory/ptr_util.h"
#include "base/numerics/safe_math.h"
@@ -163,6 +163,8 @@ bool GpuMemoryBufferImplSharedMemory::IsUsageSupported(gfx::BufferUsage usage) {
case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
case gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE:
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::SCANOUT_VEA_CPU_READ:
case gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE:
return false;
}
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
index 94d4a6c5cbd..e16be0d1538 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_test_template.h
@@ -98,7 +98,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandle) {
gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_VDA_WRITE,
+ gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VEA_CPU_READ,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
@@ -139,7 +141,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, CreateFromHandleSmallBuffer) {
gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_VDA_WRITE,
+ gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VEA_CPU_READ,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
@@ -316,7 +320,9 @@ TYPED_TEST_P(GpuMemoryBufferImplTest, SerializeAndDeserialize) {
gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_VDA_WRITE,
+ gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VEA_CPU_READ,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
index 504fad4e2ff..2df43ca9891 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_support.cc
@@ -92,10 +92,13 @@ bool GpuMemoryBufferSupport::IsNativeGpuMemoryBufferConfigurationSupported(
format == gfx::BufferFormat::R_8 ||
format == gfx::BufferFormat::RGBA_F16 ||
format == gfx::BufferFormat::BGRA_1010102 ||
- format == gfx::BufferFormat::YUV_420_BIPLANAR;
+ format == gfx::BufferFormat::YUV_420_BIPLANAR ||
+ format == gfx::BufferFormat::P010;
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE:
case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
case gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE:
+ case gfx::BufferUsage::SCANOUT_VEA_CPU_READ:
case gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE:
return false;
}
@@ -114,8 +117,10 @@ bool GpuMemoryBufferSupport::IsNativeGpuMemoryBufferConfigurationSupported(
case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE:
case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
case gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE:
+ case gfx::BufferUsage::SCANOUT_VEA_CPU_READ:
case gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE:
return false;
}
@@ -143,8 +148,10 @@ bool GpuMemoryBufferSupport::IsNativeGpuMemoryBufferConfigurationSupported(
case gfx::BufferUsage::SCANOUT_CPU_READ_WRITE:
case gfx::BufferUsage::GPU_READ_CPU_READ_WRITE:
case gfx::BufferUsage::SCANOUT_VDA_WRITE:
+ case gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE:
case gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE:
case gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE:
+ case gfx::BufferUsage::SCANOUT_VEA_CPU_READ:
case gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE:
return false;
}
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index d17f511bfee..f6c9b477e07 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -77,13 +77,17 @@ struct GpuPreferences {
bool watchdog_starts_backgrounded;
GrContextType gr_context_type;
VulkanImplementationName use_vulkan;
+ bool enable_vulkan_protected_memory;
bool enforce_vulkan_protected_memory;
bool disable_vulkan_surface;
bool disable_vulkan_fallback_to_gl_for_testing;
+ uint32 vulkan_heap_memory_limit;
+ uint32 vulkan_sync_cpu_memory_limit;
bool enable_metal;
bool enable_gpu_benchmarking_extension;
bool enable_webgpu;
bool enable_dawn_backend_validation;
+ bool disable_dawn_robustness;
bool enable_gpu_blocked_time_metric;
bool enable_perf_data_collection;
diff --git a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
index e68ee0e09ba..c65c6e6bb74 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_mojom_traits.h
@@ -6,6 +6,7 @@
#define GPU_IPC_COMMON_GPU_PREFERENCES_MOJOM_TRAITS_H_
#include <vector>
+#include "build/chromeos_buildflags.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/common/gpu_preferences.mojom-shared.h"
@@ -162,17 +163,22 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
return false;
if (!prefs.ReadUseVulkan(&out->use_vulkan))
return false;
+ out->enable_vulkan_protected_memory =
+ prefs.enable_vulkan_protected_memory();
out->enforce_vulkan_protected_memory =
prefs.enforce_vulkan_protected_memory();
out->disable_vulkan_surface = prefs.disable_vulkan_surface();
out->disable_vulkan_fallback_to_gl_for_testing =
prefs.disable_vulkan_fallback_to_gl_for_testing();
+ out->vulkan_heap_memory_limit = prefs.vulkan_heap_memory_limit();
+ out->vulkan_sync_cpu_memory_limit = prefs.vulkan_sync_cpu_memory_limit();
out->enable_metal = prefs.enable_metal();
out->enable_gpu_benchmarking_extension =
prefs.enable_gpu_benchmarking_extension();
out->enable_webgpu = prefs.enable_webgpu();
out->enable_dawn_backend_validation =
prefs.enable_dawn_backend_validation();
+ out->disable_dawn_robustness = prefs.disable_dawn_robustness();
out->enable_gpu_blocked_time_metric =
prefs.enable_gpu_blocked_time_metric();
out->enable_perf_data_collection = prefs.enable_perf_data_collection();
@@ -185,7 +191,7 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->enable_native_gpu_memory_buffers =
prefs.enable_native_gpu_memory_buffers();
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
out->platform_disallows_chromeos_direct_video_decoder =
prefs.platform_disallows_chromeos_direct_video_decoder();
#endif
@@ -329,6 +335,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
const gpu::GpuPreferences& prefs) {
return prefs.use_vulkan;
}
+ static bool enable_vulkan_protected_memory(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_vulkan_protected_memory;
+ }
static bool enforce_vulkan_protected_memory(
const gpu::GpuPreferences& prefs) {
return prefs.enforce_vulkan_protected_memory;
@@ -340,6 +349,13 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
const gpu::GpuPreferences& prefs) {
return prefs.disable_vulkan_fallback_to_gl_for_testing;
}
+ static uint32_t vulkan_heap_memory_limit(const gpu::GpuPreferences& prefs) {
+ return prefs.vulkan_heap_memory_limit;
+ }
+ static uint32_t vulkan_sync_cpu_memory_limit(
+ const gpu::GpuPreferences& prefs) {
+ return prefs.vulkan_sync_cpu_memory_limit;
+ }
static bool enable_metal(const gpu::GpuPreferences& prefs) {
return prefs.enable_metal;
}
@@ -353,6 +369,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool enable_dawn_backend_validation(const gpu::GpuPreferences& prefs) {
return prefs.enable_dawn_backend_validation;
}
+ static bool disable_dawn_robustness(const gpu::GpuPreferences& prefs) {
+ return prefs.disable_dawn_robustness;
+ }
static bool enable_gpu_blocked_time_metric(const gpu::GpuPreferences& prefs) {
return prefs.enable_gpu_blocked_time_metric;
}
@@ -369,7 +388,7 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
const gpu::GpuPreferences& prefs) {
return prefs.enable_native_gpu_memory_buffers;
}
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
static bool platform_disallows_chromeos_direct_video_decoder(
const gpu::GpuPreferences& prefs) {
return prefs.platform_disallows_chromeos_direct_video_decoder;
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.cc b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
index 35453fa7707..fa701dd5c4f 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.cc
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
@@ -17,14 +17,14 @@ namespace gpu {
#if defined(OS_ANDROID)
GpuSurfaceTracker::SurfaceRecord::SurfaceRecord(
gfx::AcceleratedWidget widget,
- jobject j_surface,
+ const base::android::JavaRef<jobject>& j_surface,
bool can_be_used_with_surface_control)
: widget(widget),
can_be_used_with_surface_control(can_be_used_with_surface_control) {
- // TODO(liberato): It would be nice to assert |surface != nullptr|, but we
+ // TODO(liberato): It would be nice to assert |surface|, but we
// can't. in_process_context_factory.cc (for tests) actually calls us without
// a Surface from java. Presumably, nobody uses it. crbug.com/712717 .
- if (j_surface != nullptr)
+ if (j_surface)
surface = gl::ScopedJavaSurface::AcquireExternalSurface(j_surface);
}
#else // defined(OS_ANDROID)
@@ -98,8 +98,7 @@ gl::ScopedJavaSurface GpuSurfaceTracker::AcquireJavaSurface(
*can_be_used_with_surface_control =
it->second.can_be_used_with_surface_control;
- return gl::ScopedJavaSurface::AcquireExternalSurface(
- j_surface.j_surface().obj());
+ return gl::ScopedJavaSurface::AcquireExternalSurface(j_surface.j_surface());
}
#endif
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.h b/chromium/gpu/ipc/common/gpu_surface_tracker.h
index 9542b22198d..a79ff8423c4 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.h
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.h
@@ -12,11 +12,16 @@
#include "base/macros.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
+#include "build/build_config.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_surface_lookup.h"
#include "gpu/ipc/common/surface_handle.h"
#include "ui/gfx/native_widget_types.h"
+#if defined(OS_ANDROID)
+#include "base/android/scoped_java_ref.h"
+#endif
+
namespace gpu {
// This class is used on Android and Mac, and is responsible for tracking native
@@ -37,7 +42,7 @@ class GPU_EXPORT GpuSurfaceTracker : public gpu::GpuSurfaceLookup {
struct SurfaceRecord {
#if defined(OS_ANDROID)
SurfaceRecord(gfx::AcceleratedWidget widget,
- jobject j_surface,
+ const base::android::JavaRef<jobject>& j_surface,
bool can_be_used_with_surface_control);
#else // defined(OS_ANDROID)
explicit SurfaceRecord(gfx::AcceleratedWidget widget);
diff --git a/chromium/gpu/ipc/common/luid_mojom_traits.h b/chromium/gpu/ipc/common/luid_mojom_traits.h
index e736c53c455..a08fbe4d7b4 100644
--- a/chromium/gpu/ipc/common/luid_mojom_traits.h
+++ b/chromium/gpu/ipc/common/luid_mojom_traits.h
@@ -5,6 +5,8 @@
#ifndef GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
#define GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
+#include <windows.h>
+
#include "gpu/ipc/common/luid.mojom-shared.h"
namespace mojo {
diff --git a/chromium/gpu/ipc/common/mojom_traits_unittest.cc b/chromium/gpu/ipc/common/mojom_traits_unittest.cc
index 3151e3ae04e..1942558e8fe 100644
--- a/chromium/gpu/ipc/common/mojom_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/mojom_traits_unittest.cc
@@ -443,7 +443,7 @@ TEST_F(StructTraitsTest, GpuPreferences) {
TEST_F(StructTraitsTest, GpuFeatureInfo) {
GpuFeatureInfo input;
- input.status_values[GPU_FEATURE_TYPE_FLASH3D] =
+ input.status_values[GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS] =
gpu::kGpuFeatureStatusBlocklisted;
input.status_values[GPU_FEATURE_TYPE_ACCELERATED_WEBGL] =
gpu::kGpuFeatureStatusUndefined;
diff --git a/chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.cc b/chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.cc
new file mode 100644
index 00000000000..abfb11cfce5
--- /dev/null
+++ b/chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.cc
@@ -0,0 +1,104 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.h"
+
+#include "base/atomic_sequence_num.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "gpu/command_buffer/service/gpu_command_buffer_memory_tracker.h"
+#include "gpu/ipc/command_buffer_task_executor.h"
+#include "gpu/ipc/common/gpu_client_ids.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+
+namespace gpu {
+
+namespace {
+// For generating command buffer id.
+base::AtomicSequenceNumber g_next_shared_route_id;
+
+CommandBufferId GenNextCommandBufferId() {
+ return CommandBufferIdFromChannelAndRoute(
+ kDisplayCompositorClientId, g_next_shared_route_id.GetNext() + 1);
+}
+} // namespace
+
+// Used for SkiaRenderer.
+DisplayCompositorMemoryAndTaskControllerOnGpu::
+ DisplayCompositorMemoryAndTaskControllerOnGpu(
+ scoped_refptr<SharedContextState> shared_context_state,
+ MailboxManager* mailbox_manager,
+ ImageFactory* image_factory,
+ SharedImageManager* shared_image_manager,
+ SyncPointManager* sync_point_manager,
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds,
+ const GpuFeatureInfo& gpu_feature_info)
+ : shared_context_state_(std::move(shared_context_state)),
+ command_buffer_id_(g_next_shared_route_id.GetNext() + 1),
+ mailbox_manager_(mailbox_manager),
+ image_factory_(image_factory),
+ shared_image_manager_(shared_image_manager),
+ sync_point_manager_(sync_point_manager),
+ gpu_preferences_(gpu_preferences),
+ gpu_driver_bug_workarounds_(gpu_driver_bug_workarounds),
+ gpu_feature_info_(gpu_feature_info),
+ should_have_memory_tracker_(true) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+}
+
+// Used for InProcessCommandBuffer.
+DisplayCompositorMemoryAndTaskControllerOnGpu::
+ DisplayCompositorMemoryAndTaskControllerOnGpu(
+ CommandBufferTaskExecutor* task_executor,
+ ImageFactory* image_factory)
+ : shared_context_state_(task_executor->GetSharedContextState()),
+ command_buffer_id_(GenNextCommandBufferId()),
+ mailbox_manager_(task_executor->mailbox_manager()),
+ image_factory_(image_factory),
+ shared_image_manager_(task_executor->shared_image_manager()),
+ sync_point_manager_(task_executor->sync_point_manager()),
+ gpu_preferences_(task_executor->gpu_preferences()),
+ gpu_driver_bug_workarounds_(
+ GpuDriverBugWorkarounds(task_executor->gpu_feature_info()
+ .enabled_gpu_driver_bug_workarounds)),
+ gpu_feature_info_(task_executor->gpu_feature_info()) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+
+ // Android WebView won't have a memory tracker.
+ if (task_executor->ShouldCreateMemoryTracker()) {
+ should_have_memory_tracker_ = true;
+ memory_tracker_ = std::make_unique<GpuCommandBufferMemoryTracker>(
+ command_buffer_id_,
+ base::trace_event::MemoryDumpManager::GetInstance()
+ ->GetTracingProcessId(),
+ base::ThreadTaskRunnerHandle::Get(),
+ /* obserer=*/nullptr);
+ } else {
+ should_have_memory_tracker_ = false;
+ }
+}
+
+DisplayCompositorMemoryAndTaskControllerOnGpu::
+ ~DisplayCompositorMemoryAndTaskControllerOnGpu() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+}
+
+MemoryTracker* DisplayCompositorMemoryAndTaskControllerOnGpu::memory_tracker()
+ const {
+ if (!should_have_memory_tracker_)
+ return nullptr;
+
+ if (memory_tracker_)
+ return memory_tracker_.get();
+ else
+ return shared_context_state_->memory_tracker();
+}
+
+// Static
+CommandBufferId
+DisplayCompositorMemoryAndTaskControllerOnGpu::NextCommandBufferId() {
+ return GenNextCommandBufferId();
+}
+} // namespace gpu
diff --git a/chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.h b/chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.h
new file mode 100644
index 00000000000..798940e5587
--- /dev/null
+++ b/chromium/gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.h
@@ -0,0 +1,98 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_DISPLAY_COMPOSITOR_MEMORY_AND_TASK_CONTROLLER_ON_GPU_H_
+#define GPU_IPC_DISPLAY_COMPOSITOR_MEMORY_AND_TASK_CONTROLLER_ON_GPU_H_
+
+#include "base/macros.h"
+#include "base/sequence_checker.h"
+#include "gpu/command_buffer/common/context_creation_attribs.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/command_buffer/service/sequence_id.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/config/gpu_driver_bug_workarounds.h"
+#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/gl_in_process_context_export.h"
+
+namespace gpu {
+class CommandBufferTaskExecutor;
+class ImageFactory;
+class MailboxManager;
+class SyncPointManager;
+class SharedImageManager;
+struct GpuFeatureInfo;
+struct GpuPreferences;
+
+// This class holds ownership of data structure that is only used on the gpu
+// thread. This class is expected to be 1:1 relationship with the display
+// compositor.
+class GL_IN_PROCESS_CONTEXT_EXPORT
+ DisplayCompositorMemoryAndTaskControllerOnGpu {
+ public:
+ // Used for SkiaRenderer.
+ DisplayCompositorMemoryAndTaskControllerOnGpu(
+ scoped_refptr<SharedContextState> shared_context_state,
+ MailboxManager* mailbox_manager,
+ ImageFactory* image_factory,
+ SharedImageManager* shared_image_manager,
+ SyncPointManager* sync_point_manager,
+ const GpuPreferences& gpu_preferences,
+ const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds,
+ const GpuFeatureInfo& gpu_feature_info);
+ // Used for InProcessCommandBuffer.
+ DisplayCompositorMemoryAndTaskControllerOnGpu(
+ CommandBufferTaskExecutor* task_executor,
+ ImageFactory* image_factory);
+ DisplayCompositorMemoryAndTaskControllerOnGpu(
+ const DisplayCompositorMemoryAndTaskControllerOnGpu&) = delete;
+ DisplayCompositorMemoryAndTaskControllerOnGpu& operator=(
+ const DisplayCompositorMemoryAndTaskControllerOnGpu&) = delete;
+ ~DisplayCompositorMemoryAndTaskControllerOnGpu();
+
+ SharedContextState* shared_context_state() const {
+ return shared_context_state_.get();
+ }
+ MemoryTracker* memory_tracker() const;
+ CommandBufferId command_buffer_id() const { return command_buffer_id_; }
+ // Used to create SharedImageInterface. Only used for in process command
+ // buffer and shared image channels created for the display compositor in the
+ // GPU process. Not Used for cross process shared image stub.
+ static gpu::CommandBufferId NextCommandBufferId();
+
+ MailboxManager* mailbox_manager() const { return mailbox_manager_; }
+ ImageFactory* image_factory() const { return image_factory_; }
+ SharedImageManager* shared_image_manager() const {
+ return shared_image_manager_;
+ }
+ SyncPointManager* sync_point_manager() const { return sync_point_manager_; }
+ const GpuPreferences& gpu_preferences() const { return gpu_preferences_; }
+ const GpuDriverBugWorkarounds& gpu_driver_bug_workarounds() const {
+ return gpu_driver_bug_workarounds_;
+ }
+ const GpuFeatureInfo& gpu_feature_info() const { return gpu_feature_info_; }
+
+ private:
+ scoped_refptr<SharedContextState> shared_context_state_;
+
+ const CommandBufferId command_buffer_id_;
+
+ // Used for creating SharedImageFactory.
+ MailboxManager* mailbox_manager_;
+ ImageFactory* image_factory_;
+ SharedImageManager* shared_image_manager_;
+ SyncPointManager* sync_point_manager_;
+ const GpuPreferences& gpu_preferences_;
+ GpuDriverBugWorkarounds gpu_driver_bug_workarounds_;
+ const GpuFeatureInfo& gpu_feature_info_;
+
+ // Only needed for InProcessCommandBuffer.
+ bool should_have_memory_tracker_ = false;
+ std::unique_ptr<MemoryTracker> memory_tracker_;
+
+ SEQUENCE_CHECKER(gpu_sequence_checker_);
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_DISPLAY_COMPOSITOR_MEMORY_AND_TASK_CONTROLLER_ON_GPU_H_
diff --git a/chromium/gpu/ipc/gl_in_process_context.cc b/chromium/gpu/ipc/gl_in_process_context.cc
index a710c6a808a..7f46c821062 100644
--- a/chromium/gpu/ipc/gl_in_process_context.cc
+++ b/chromium/gpu/ipc/gl_in_process_context.cc
@@ -22,6 +22,7 @@
#include "gpu/command_buffer/common/command_buffer.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/config/gpu_feature_info.h"
+#include "gpu/ipc/gpu_task_scheduler_helper.h"
#include "gpu/skia_bindings/gles2_implementation_with_grcontext_support.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gl/gl_image.h"
@@ -61,6 +62,8 @@ ContextResult GLInProcessContext::Initialize(
const SharedMemoryLimits& mem_limits,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
ImageFactory* image_factory,
+ GpuTaskSchedulerHelper* gpu_task_scheduler,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller_on_gpu,
scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
// If a surface is provided, we are running in a webview and should not have
// a task runner. We must have a task runner in all other cases.
@@ -79,7 +82,10 @@ ContextResult GLInProcessContext::Initialize(
surface, is_offscreen, window, attribs, gpu_memory_buffer_manager,
image_factory,
/*gpu_channel_manager_delegate=*/nullptr, std::move(task_runner),
- /*task_sequence=*/nullptr, nullptr, nullptr);
+ /*task_sequence=*/
+ gpu_task_scheduler ? gpu_task_scheduler->GetTaskSequence() : nullptr,
+ /*display_compositor_memory_and_task_controller_on_gpu=*/
+ display_controller_on_gpu, nullptr, nullptr);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
@@ -93,6 +99,8 @@ ContextResult GLInProcessContext::Initialize(
LOG(ERROR) << "Failed to initialize GLES2CmdHelper";
return result;
}
+ if (gpu_task_scheduler)
+ gpu_task_scheduler->Initialize(gles2_helper_.get());
// Create a transfer buffer.
transfer_buffer_ = std::make_unique<TransferBuffer>(gles2_helper_.get());
diff --git a/chromium/gpu/ipc/gl_in_process_context.h b/chromium/gpu/ipc/gl_in_process_context.h
index 20c71ba84c3..5f0a0354426 100644
--- a/chromium/gpu/ipc/gl_in_process_context.h
+++ b/chromium/gpu/ipc/gl_in_process_context.h
@@ -16,6 +16,7 @@
#include "ui/gl/gl_surface.h"
namespace gpu {
+class GpuTaskSchedulerHelper;
class SharedImageInterface;
class TransferBuffer;
struct GpuFeatureInfo;
@@ -50,6 +51,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GLInProcessContext {
const SharedMemoryLimits& memory_limits,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
ImageFactory* image_factory,
+ GpuTaskSchedulerHelper* gpu_task_scheduler,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller_on_gpu,
scoped_refptr<base::SingleThreadTaskRunner> task_runner);
const Capabilities& GetCapabilities() const;
diff --git a/chromium/gpu/ipc/gpu_task_scheduler_helper.cc b/chromium/gpu/ipc/gpu_task_scheduler_helper.cc
index b322404ca57..cf4fbc4a78a 100644
--- a/chromium/gpu/ipc/gpu_task_scheduler_helper.cc
+++ b/chromium/gpu/ipc/gpu_task_scheduler_helper.cc
@@ -6,6 +6,7 @@
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/ipc/command_buffer_task_executor.h"
+#include "gpu/ipc/scheduler_sequence.h"
#include "gpu/ipc/single_task_sequence.h"
namespace gpu {
@@ -63,7 +64,8 @@ SequenceId GpuTaskSchedulerHelper::GetSequenceId() {
}
gpu::SingleTaskSequence* GpuTaskSchedulerHelper::GetTaskSequence() const {
- DCHECK(using_command_buffer_);
+ // The are two places this function is called: inside command buffer or during
+ // start up or tear down.
return task_sequence_.get();
}
diff --git a/chromium/gpu/ipc/gpu_task_scheduler_helper.h b/chromium/gpu/ipc/gpu_task_scheduler_helper.h
index ff4ca477ec7..b12c77965f6 100644
--- a/chromium/gpu/ipc/gpu_task_scheduler_helper.h
+++ b/chromium/gpu/ipc/gpu_task_scheduler_helper.h
@@ -12,11 +12,13 @@
namespace viz {
class VizProcessContextProvider;
+class DisplayCompositorMemoryAndTaskController;
}
namespace gpu {
class CommandBufferTaskExecutor;
class CommandBufferHelper;
+class GLInProcessContext;
class SingleTaskSequence;
class InProcessCommandBuffer;
@@ -31,8 +33,7 @@ class InProcessCommandBuffer;
// it is created on VizProcessContextProvider. When this is used with
// SkiaRenderer, it is created on SkiaOutputSurfaceImpl. Each user of this class
// would hold a reference.
-class GL_IN_PROCESS_CONTEXT_EXPORT GpuTaskSchedulerHelper
- : public base::RefCounted<GpuTaskSchedulerHelper> {
+class GL_IN_PROCESS_CONTEXT_EXPORT GpuTaskSchedulerHelper {
public:
// This constructor is only used for SkiaOutputSurface.
explicit GpuTaskSchedulerHelper(
@@ -40,6 +41,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuTaskSchedulerHelper
// This constructor is used for command buffer GLOutputSurface.
explicit GpuTaskSchedulerHelper(
CommandBufferTaskExecutor* command_buffer_task_executor);
+ ~GpuTaskSchedulerHelper();
// This function sets up the |command_buffer_helper| which flushes the command
// buffer when a user outside of the command buffer shares the same
@@ -62,16 +64,15 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuTaskSchedulerHelper
SequenceId GetSequenceId();
private:
- friend class base::RefCounted<GpuTaskSchedulerHelper>;
- ~GpuTaskSchedulerHelper();
-
// If |using_command_buffer_| is true, we are using this class with
// GLOutputSurface. Otherwise we are using this class with
// SkiaOutputSurface.
bool using_command_buffer_;
+ friend class gpu::GLInProcessContext;
friend class gpu::InProcessCommandBuffer;
friend class viz::VizProcessContextProvider;
+ friend class viz::DisplayCompositorMemoryAndTaskController;
// Only used for inside CommandBuffer implementation.
SingleTaskSequence* GetTaskSequence() const;
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index 2300025563b..17a438f1172 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -39,7 +39,9 @@ GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations(
gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_VDA_WRITE,
+ gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VEA_CPU_READ,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
diff --git a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
index 6f53c16859d..e0456b9d2f7 100644
--- a/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
+++ b/chromium/gpu/ipc/host/shader_disk_cache_unittest.cc
@@ -6,7 +6,7 @@
#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
-#include "base/test/bind_test_util.h"
+#include "base/test/bind.h"
#include "base/test/task_environment.h"
#include "net/base/test_completion_callback.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index 43659617af4..de5c85df5b1 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -12,7 +12,6 @@
#include "base/atomic_sequence_num.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/containers/queue.h"
@@ -61,7 +60,6 @@
#include "gpu/config/gpu_preferences.h"
#include "gpu/config/gpu_switches.h"
#include "gpu/ipc/command_buffer_task_executor.h"
-#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_client_ids.h"
#include "gpu/ipc/host/gpu_memory_buffer_support.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
@@ -96,14 +94,8 @@ namespace gpu {
namespace {
-base::AtomicSequenceNumber g_next_route_id;
base::AtomicSequenceNumber g_next_image_id;
-CommandBufferId NextCommandBufferId() {
- return CommandBufferIdFromChannelAndRoute(kDisplayCompositorClientId,
- g_next_route_id.GetNext() + 1);
-}
-
template <typename T>
base::OnceClosure WrapTaskWithResult(base::OnceCallback<T(void)> task,
T* result,
@@ -171,8 +163,7 @@ bool InProcessCommandBuffer::SharedImageInterfaceHelper::EnableWrappedSkImage()
InProcessCommandBuffer::InProcessCommandBuffer(
CommandBufferTaskExecutor* task_executor,
const GURL& active_url)
- : command_buffer_id_(NextCommandBufferId()),
- active_url_(active_url),
+ : active_url_(active_url),
flush_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
base::WaitableEvent::InitialState::NOT_SIGNALED),
task_executor_(task_executor),
@@ -201,6 +192,10 @@ int InProcessCommandBuffer::GetRasterDecoderIdForTest() const {
->DecoderIdForTest();
}
+webgpu::WebGPUDecoder* InProcessCommandBuffer::GetWebGPUDecoderForTest() const {
+ return static_cast<webgpu::WebGPUDecoder*>(decoder_.get());
+}
+
gpu::SharedImageInterface* InProcessCommandBuffer::GetSharedImageInterface()
const {
return shared_image_interface_.get();
@@ -261,6 +256,7 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
GpuChannelManagerDelegate* gpu_channel_manager_delegate,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
SingleTaskSequence* task_sequence,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* gpu_dependency,
gpu::raster::GrShaderCache* gr_shader_cache,
GpuProcessActivityFlags* activity_flags) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
@@ -289,8 +285,8 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
Capabilities capabilities;
InitializeOnGpuThreadParams params(surface_handle, attribs, &capabilities,
- image_factory, gr_shader_cache,
- activity_flags);
+ image_factory, gpu_dependency,
+ gr_shader_cache, activity_flags);
base::OnceCallback<gpu::ContextResult(void)> init_task =
base::BindOnce(&InProcessCommandBuffer::InitializeOnGpuThread,
@@ -304,7 +300,7 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
task_sequence_ = task_sequence;
} else {
task_scheduler_holder_ =
- base::MakeRefCounted<gpu::GpuTaskSchedulerHelper>(task_executor_);
+ std::make_unique<gpu::GpuTaskSchedulerHelper>(task_executor_);
task_sequence_ = task_scheduler_holder_->GetTaskSequence();
}
@@ -322,9 +318,7 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
if (result == gpu::ContextResult::kSuccess) {
capabilities_ = capabilities;
shared_image_interface_ = std::make_unique<SharedImageInterfaceInProcess>(
- task_executor_, task_sequence_, NextCommandBufferId(),
- context_group_->mailbox_manager(), image_factory_,
- context_group_->memory_tracker(),
+ task_sequence_, gpu_dependency_,
std::make_unique<SharedImageInterfaceHelper>(this));
}
@@ -337,6 +331,15 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
TRACE_EVENT0("gpu", "InProcessCommandBuffer::InitializeOnGpuThread")
UpdateActiveUrl();
+ if (params.gpu_dependency) {
+ gpu_dependency_ = params.gpu_dependency;
+ } else {
+ gpu_dependency_holder_ =
+ std::make_unique<DisplayCompositorMemoryAndTaskControllerOnGpu>(
+ task_executor_, params.image_factory);
+ gpu_dependency_ = gpu_dependency_holder_.get();
+ }
+
if (gpu_channel_manager_delegate_ &&
gpu_channel_manager_delegate_->IsExiting()) {
LOG(ERROR) << "ContextResult::kTransientFailure: trying to create command "
@@ -354,7 +357,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
base::trace_event::MemoryDumpManager::GetInstance()
->GetTracingProcessId();
memory_tracker = std::make_unique<GpuCommandBufferMemoryTracker>(
- command_buffer_id_, client_tracing_id, params.attribs.context_type,
+ gpu_dependency_->command_buffer_id(), client_tracing_id,
base::ThreadTaskRunnerHandle::Get(), /* obserer=*/nullptr);
}
@@ -398,7 +401,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
: "0");
command_buffer_ = std::make_unique<CommandBufferService>(
- this, context_group_->memory_tracker());
+ this, gpu_dependency_->memory_tracker());
context_state_ = task_executor_->GetSharedContextState();
@@ -496,7 +499,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
std::unique_ptr<webgpu::WebGPUDecoder> webgpu_decoder(
webgpu::WebGPUDecoder::Create(
this, command_buffer_.get(), task_executor_->shared_image_manager(),
- context_group_->memory_tracker(), task_executor_->outputter(),
+ gpu_dependency_->memory_tracker(), task_executor_->outputter(),
task_executor_->gpu_preferences()));
gpu::ContextResult result = webgpu_decoder->Initialize();
if (result != gpu::ContextResult::kSuccess) {
@@ -571,7 +574,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
decoder_.reset(raster::RasterDecoder::Create(
this, command_buffer_.get(), task_executor_->outputter(),
task_executor_->gpu_feature_info(), task_executor_->gpu_preferences(),
- context_group_->memory_tracker(),
+ gpu_dependency_->memory_tracker(),
task_executor_->shared_image_manager(), context_state_,
true /*is_privileged*/));
} else {
@@ -717,6 +720,9 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
if (context_state_)
context_state_->MakeCurrent(nullptr);
context_state_ = nullptr;
+
+ if (gpu_dependency_holder_)
+ gpu_dependency_holder_.reset();
return true;
}
@@ -1370,7 +1376,7 @@ CommandBufferNamespace InProcessCommandBuffer::GetNamespaceID() const {
}
CommandBufferId InProcessCommandBuffer::GetCommandBufferID() const {
- return command_buffer_id_;
+ return gpu_dependency_->command_buffer_id();
}
void InProcessCommandBuffer::FlushPendingWork() {
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index 8910250b095..82d4dc1c726 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -41,6 +41,7 @@
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/command_buffer_task_executor.h"
+#include "gpu/ipc/display_compositor_memory_and_task_controller_on_gpu.h"
#include "gpu/ipc/gl_in_process_context_export.h"
#include "gpu/ipc/gpu_task_scheduler_helper.h"
#include "gpu/ipc/service/context_url.h"
@@ -81,6 +82,10 @@ class SyncPointClientState;
struct ContextCreationAttribs;
struct SwapBuffersCompleteParams;
+namespace webgpu {
+class WebGPUDecoder;
+}
+
namespace raster {
class GrShaderCache;
}
@@ -118,6 +123,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
GpuChannelManagerDelegate* gpu_channel_manager_delegate,
scoped_refptr<base::SingleThreadTaskRunner> task_runner,
SingleTaskSequence* task_sequence,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* gpu_dependency,
gpu::raster::GrShaderCache* gr_shader_cache,
GpuProcessActivityFlags* activity_flags);
@@ -209,6 +215,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
gpu::ServiceTransferCache* GetTransferCacheForTest() const;
int GetRasterDecoderIdForTest() const;
+ webgpu::WebGPUDecoder* GetWebGPUDecoderForTest() const;
CommandBufferTaskExecutor* service_for_testing() const {
return task_executor_;
@@ -240,35 +247,29 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// and |surface_handle| provided in Initialize outlive this callback.
base::ScopedClosureRunner GetCacheBackBufferCb();
- gpu::SharedImageManager* GetSharedImageManager() {
- return task_executor_->shared_image_manager();
- }
-
- gpu::MemoryTracker* GetMemoryTracker() {
- // Should only be called after initialization.
- DCHECK(context_group_);
- return context_group_->memory_tracker();
- }
-
private:
struct InitializeOnGpuThreadParams {
SurfaceHandle surface_handle;
const ContextCreationAttribs& attribs;
Capabilities* capabilities; // Ouptut.
ImageFactory* image_factory;
+ DisplayCompositorMemoryAndTaskControllerOnGpu* gpu_dependency;
gpu::raster::GrShaderCache* gr_shader_cache;
GpuProcessActivityFlags* activity_flags;
- InitializeOnGpuThreadParams(SurfaceHandle surface_handle,
- const ContextCreationAttribs& attribs,
- Capabilities* capabilities,
- ImageFactory* image_factory,
- gpu::raster::GrShaderCache* gr_shader_cache,
- GpuProcessActivityFlags* activity_flags)
+ InitializeOnGpuThreadParams(
+ SurfaceHandle surface_handle,
+ const ContextCreationAttribs& attribs,
+ Capabilities* capabilities,
+ ImageFactory* image_factory,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* gpu_dependency,
+ gpu::raster::GrShaderCache* gr_shader_cache,
+ GpuProcessActivityFlags* activity_flags)
: surface_handle(surface_handle),
attribs(attribs),
capabilities(capabilities),
image_factory(image_factory),
+ gpu_dependency(gpu_dependency),
gr_shader_cache(gr_shader_cache),
activity_flags(activity_flags) {}
};
@@ -348,7 +349,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void HandleGpuVSyncOnOriginThread(base::TimeTicks vsync_time,
base::TimeDelta vsync_interval);
- const CommandBufferId command_buffer_id_;
const ContextUrl active_url_;
bool is_offscreen_ = false;
@@ -360,6 +360,9 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
// Members accessed on the gpu thread (possibly with the exception of
// creation):
+ DisplayCompositorMemoryAndTaskControllerOnGpu* gpu_dependency_;
+ std::unique_ptr<DisplayCompositorMemoryAndTaskControllerOnGpu>
+ gpu_dependency_holder_;
bool use_virtualized_gl_context_ = false;
raster::GrShaderCache* gr_shader_cache_ = nullptr;
scoped_refptr<base::SingleThreadTaskRunner> origin_task_runner_;
@@ -399,7 +402,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
CommandBufferTaskExecutor* const task_executor_;
// If no SingleTaskSequence is passed in, create our own.
- scoped_refptr<GpuTaskSchedulerHelper> task_scheduler_holder_;
+ std::unique_ptr<GpuTaskSchedulerHelper> task_scheduler_holder_;
// Pointer to the SingleTaskSequence that actually does the scheduling.
SingleTaskSequence* task_sequence_;
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 300d8b9b22d..43ae12b7cd6 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -5,7 +5,7 @@
#include "gpu/ipc/in_process_gpu_thread_holder.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
diff --git a/chromium/gpu/ipc/raster_in_process_context.cc b/chromium/gpu/ipc/raster_in_process_context.cc
index 86e091926c2..204ee69dfac 100644
--- a/chromium/gpu/ipc/raster_in_process_context.cc
+++ b/chromium/gpu/ipc/raster_in_process_context.cc
@@ -67,7 +67,9 @@ ContextResult RasterInProcessContext::Initialize(
nullptr /* surface */, true /* is_offscreen */, kNullSurfaceHandle,
attribs, gpu_memory_buffer_manager, image_factory,
gpu_channel_manager_delegate, client_task_runner_,
- nullptr /* task_sequence */, gr_shader_cache, activity_flags);
+ nullptr /* task_sequence */,
+ nullptr /*display_compositor_memory_and_task_controller_on_gpu */,
+ gr_shader_cache, activity_flags);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
diff --git a/chromium/gpu/ipc/scheduler_sequence.h b/chromium/gpu/ipc/scheduler_sequence.h
index 15ae245cfe1..b8f56251eb9 100644
--- a/chromium/gpu/ipc/scheduler_sequence.h
+++ b/chromium/gpu/ipc/scheduler_sequence.h
@@ -18,6 +18,7 @@
namespace viz {
class Display;
+class DisplayCompositorMemoryAndTaskController;
class ScopedAllowGpuAccessForDisplayResourceProvider;
class OutputSurfaceProviderImpl;
class OverlayProcessorAndroid;
@@ -36,6 +37,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT ScopedAllowScheduleGpuTask {
// Only add more friend declarations for classes that Android WebView is
// guaranteed to be able to support. Talk to boliu@ if in doubt.
friend class viz::Display;
+ friend class viz::DisplayCompositorMemoryAndTaskController;
friend class viz::ScopedAllowGpuAccessForDisplayResourceProvider;
friend class viz::OutputSurfaceProviderImpl;
// Overlay is not supported for WebView. However the initialization and
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index 4c943d09aec..6ee0b6469ae 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -37,8 +37,6 @@ component("service") {
"gpu_memory_buffer_factory.h",
"gpu_watchdog_thread.cc",
"gpu_watchdog_thread.h",
- "gpu_watchdog_thread_v2.cc",
- "gpu_watchdog_thread_v2.h",
"image_decode_accelerator_stub.cc",
"image_decode_accelerator_stub.h",
"image_decode_accelerator_worker.h",
@@ -74,6 +72,7 @@ component("service") {
]
deps = [
"//base/third_party/dynamic_annotations",
+ "//build:chromeos_buildflags",
# crbug.com/799267: crash_key needs to be added explicitly for Windows and
# Mac even though it's not directly referenced, because it's being
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.cc b/chromium/gpu/ipc/service/command_buffer_stub.cc
index dea796c946d..323f9a7cd41 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/command_buffer_stub.cc
@@ -7,7 +7,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/hash/hash.h"
#include "base/json/json_writer.h"
#include "base/macros.h"
@@ -649,15 +649,14 @@ void CommandBufferStub::RemoveDestructionObserver(
destruction_observers_.RemoveObserver(observer);
}
-std::unique_ptr<MemoryTracker> CommandBufferStub::CreateMemoryTracker(
- const GPUCreateCommandBufferConfig& init_params) const {
+std::unique_ptr<MemoryTracker> CommandBufferStub::CreateMemoryTracker() const {
MemoryTrackerFactory current_factory = GetMemoryTrackerFactory();
if (current_factory)
- return current_factory.Run(init_params);
+ return current_factory.Run();
return std::make_unique<GpuCommandBufferMemoryTracker>(
command_buffer_id_, channel_->client_tracing_id(),
- init_params.attribs.context_type, channel_->task_runner(),
+ channel_->task_runner(),
channel_->gpu_channel_manager()->peak_memory_monitor());
}
@@ -667,6 +666,10 @@ void CommandBufferStub::SetMemoryTrackerFactoryForTesting(
SetOrGetMemoryTrackerFactory(factory);
}
+MemoryTracker* CommandBufferStub::GetMemoryTracker() const {
+ return memory_tracker_.get();
+}
+
scoped_refptr<Buffer> CommandBufferStub::GetTransferBuffer(int32_t id) {
return command_buffer_->GetTransferBuffer(id);
}
diff --git a/chromium/gpu/ipc/service/command_buffer_stub.h b/chromium/gpu/ipc/service/command_buffer_stub.h
index 641306f773d..ce899fa04a4 100644
--- a/chromium/gpu/ipc/service/command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/command_buffer_stub.h
@@ -84,7 +84,8 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) = 0;
- virtual MemoryTracker* GetMemoryTracker() const = 0;
+ MemoryTracker* GetMemoryTracker() const;
+ virtual MemoryTracker* GetContextGroupMemoryTracker() const = 0;
// IPC::Listener implementation:
bool OnMessageReceived(const IPC::Message& message) override;
@@ -106,8 +107,7 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
void HandleReturnData(base::span<const uint8_t> data) override;
using MemoryTrackerFactory =
- base::RepeatingCallback<std::unique_ptr<MemoryTracker>(
- const GPUCreateCommandBufferConfig&)>;
+ base::RepeatingCallback<std::unique_ptr<MemoryTracker>()>;
// Overrides the way CreateMemoryTracker() uses to create a MemoryTracker.
// This is intended for mocking the MemoryTracker in tests.
@@ -146,8 +146,7 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
protected:
virtual bool HandleMessage(const IPC::Message& message) = 0;
- std::unique_ptr<MemoryTracker> CreateMemoryTracker(
- const GPUCreateCommandBufferConfig& init_params) const;
+ std::unique_ptr<MemoryTracker> CreateMemoryTracker() const;
// Must be called during Initialize(). Takes ownership to co-ordinate
// teardown in Destroy().
@@ -173,6 +172,12 @@ class GPU_IPC_SERVICE_EXPORT CommandBufferStub
std::unique_ptr<CommandBufferService> command_buffer_;
+ // Have an ownership of the memory tracker used in children class. This is to
+ // ensure that the memory tracker outlives the objects that uses it, for
+ // example the ContextGroup referenced both in the Decoder and the
+ // CommandBufferStub.
+ std::unique_ptr<gpu::MemoryTracker> memory_tracker_;
+
scoped_refptr<gl::GLSurface> surface_;
scoped_refptr<SyncPointClientState> sync_point_client_state_;
scoped_refptr<gl::GLShareGroup> share_group_;
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
index 9ccf7efcef6..025815ab5d1 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
@@ -8,7 +8,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/hash/hash.h"
#include "base/json/json_writer.h"
#include "base/macros.h"
@@ -85,6 +85,7 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
GpuChannelManager* manager = channel_->gpu_channel_manager();
DCHECK(manager);
+ memory_tracker_ = CreateMemoryTracker();
if (share_command_buffer_stub) {
context_group_ =
@@ -107,7 +108,7 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
manager->gpu_memory_buffer_factory();
context_group_ = new gles2::ContextGroup(
manager->gpu_preferences(), gles2::PassthroughCommandDecoderSupported(),
- manager->mailbox_manager(), CreateMemoryTracker(init_params),
+ manager->mailbox_manager(), CreateMemoryTracker(),
manager->shader_translator_cache(),
manager->framebuffer_completeness_cache(), feature_info,
init_params.attribs.bind_generates_resource, channel_->image_manager(),
@@ -432,7 +433,7 @@ base::TimeDelta GLES2CommandBufferStub::GetGpuBlockedTimeSinceLastSwap() {
return channel_->scheduler()->TakeTotalBlockingTime();
}
-MemoryTracker* GLES2CommandBufferStub::GetMemoryTracker() const {
+MemoryTracker* GLES2CommandBufferStub::GetContextGroupMemoryTracker() const {
return context_group_->memory_tracker();
}
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
index fbcde76644e..5eca68fd8c6 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
@@ -37,7 +37,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
CommandBufferStub* share_group,
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) override;
- MemoryTracker* GetMemoryTracker() const override;
+ MemoryTracker* GetContextGroupMemoryTracker() const override;
// DecoderClient implementation.
void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index f50e5aeb020..9b2e01091ff 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -836,8 +836,9 @@ uint64_t GpuChannel::GetMemoryUsage() const {
unique_memory_trackers.reserve(stubs_.size());
uint64_t size = 0;
for (const auto& kv : stubs_) {
- MemoryTracker* tracker = kv.second->GetMemoryTracker();
- if (!unique_memory_trackers.insert(tracker).second) {
+ size += kv.second->GetMemoryTracker()->GetSize();
+ MemoryTracker* tracker = kv.second->GetContextGroupMemoryTracker();
+ if (!tracker || !unique_memory_trackers.insert(tracker).second) {
// We already counted that tracker.
continue;
}
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 4d3aa8eb3c3..1471ceae556 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -16,6 +16,7 @@
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/command_buffer/service/gpu_switches.h"
#include "gpu/command_buffer/service/service_utils.h"
@@ -27,7 +28,6 @@
#include "gpu/config/gpu_switching.h"
#include "gpu/config/gpu_util.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
-#include "gpu/ipc/service/gpu_watchdog_thread_v2.h"
#include "ui/base/ui_base_features.h"
#include "ui/gfx/switches.h"
#include "ui/gl/buildflags.h"
@@ -55,7 +55,7 @@
#if defined(OS_ANDROID)
#include "base/android/android_image_reader_compat.h"
-#include "ui/gl/android/android_surface_control_compat.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#endif
#if BUILDFLAG(ENABLE_VULKAN)
@@ -103,11 +103,11 @@ void InitializePlatformOverlaySettings(GPUInfo* gpu_info,
CollectHardwareOverlayInfo(&gpu_info->overlay_info);
#elif defined(OS_ANDROID)
if (gpu_info->gpu.vendor_string == "Qualcomm")
- gl::SurfaceControl::EnableQualcommUBWC();
+ gfx::SurfaceControl::EnableQualcommUBWC();
#endif
}
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !BUILDFLAG(IS_CHROMECAST)
+#if BUILDFLAG(IS_LACROS) || (defined(OS_LINUX) && !BUILDFLAG(IS_CHROMECAST))
bool CanAccessNvidiaDeviceFile() {
bool res = true;
base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
@@ -118,7 +118,8 @@ bool CanAccessNvidiaDeviceFile() {
}
return res;
}
-#endif // OS_LINUX && !OS_CHROMEOS && !BUILDFLAG(IS_CHROMECAST)
+#endif // BUILDFLAG(IS_LACROS) || (defined(OS_LINUX) &&
+ // !BUILDFLAG(IS_CHROMECAST))
class GpuWatchdogInit {
public:
@@ -136,6 +137,7 @@ class GpuWatchdogInit {
// TODO(https://crbug.com/1095744): We currently do not handle
// VK_ERROR_DEVICE_LOST in in-process-gpu.
+// Android WebView is allowed for now because it CHECKs on context loss.
void DisableInProcessGpuVulkan(GpuFeatureInfo* gpu_feature_info,
GpuPreferences* gpu_preferences) {
if (gpu_feature_info->status_values[GPU_FEATURE_TYPE_VULKAN] ==
@@ -203,7 +205,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
device_perf_info_ = device_perf_info;
}
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+#if defined(OS_LINUX) || BUILDFLAG(IS_LACROS)
if (gpu_info_.gpu.vendor_id == 0x10de && // NVIDIA
gpu_info_.gpu.driver_vendor == "NVIDIA" && !CanAccessNvidiaDeviceFile())
return false;
@@ -249,7 +251,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
bool delayed_watchdog_enable = false;
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
// Don't start watchdog immediately, to allow developers to switch to VT2 on
// startup.
delayed_watchdog_enable = true;
@@ -262,27 +264,22 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
if (gpu_preferences_.gpu_sandbox_start_early) {
// The sandbox will be started earlier than usual (i.e. before GL) so
// execute the pre-sandbox steps now.
- sandbox_helper_->PreSandboxStartup();
+ sandbox_helper_->PreSandboxStartup(gpu_preferences);
}
#else
// For some reasons MacOSX's VideoToolbox might crash when called after
// initializing GL, see crbug.com/1047643 and crbug.com/871280. On other
// operating systems like Windows and Android the pre-sandbox steps have
// always been executed before initializing GL so keep it this way.
- sandbox_helper_->PreSandboxStartup();
+ sandbox_helper_->PreSandboxStartup(gpu_preferences);
#endif
// Start the GPU watchdog only after anything that is expected to be time
// consuming has completed, otherwise the process is liable to be aborted.
if (enable_watchdog && !delayed_watchdog_enable) {
- if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2)) {
- watchdog_thread_ = GpuWatchdogThreadImplV2::Create(
- gpu_preferences_.watchdog_starts_backgrounded);
- watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
- } else {
- watchdog_thread_ = GpuWatchdogThreadImplV1::Create(
- gpu_preferences_.watchdog_starts_backgrounded);
- }
+ watchdog_thread_ = GpuWatchdogThread::Create(
+ gpu_preferences_.watchdog_starts_backgrounded);
+ watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
#if defined(OS_WIN)
// This is a workaround for an occasional deadlock between watchdog and
@@ -382,7 +379,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
watchdog_thread_->PauseWatchdog();
// The sandbox is not started yet.
- sandbox_helper_->PreSandboxStartup();
+ sandbox_helper_->PreSandboxStartup(gpu_preferences);
if (watchdog_thread_)
watchdog_thread_->ResumeWatchdog();
@@ -569,14 +566,9 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
watchdog_thread_ = nullptr;
watchdog_init.SetGpuWatchdogPtr(nullptr);
} else if (enable_watchdog && delayed_watchdog_enable) {
- if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2)) {
- watchdog_thread_ = GpuWatchdogThreadImplV2::Create(
- gpu_preferences_.watchdog_starts_backgrounded);
- watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
- } else {
- watchdog_thread_ = GpuWatchdogThreadImplV1::Create(
- gpu_preferences_.watchdog_starts_backgrounded);
- }
+ watchdog_thread_ = GpuWatchdogThread::Create(
+ gpu_preferences_.watchdog_starts_backgrounded);
+ watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
}
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
@@ -620,7 +612,14 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
InitializeGLThreadSafe(command_line, gpu_preferences_, &gpu_info_,
&gpu_feature_info_);
- DisableInProcessGpuVulkan(&gpu_feature_info_, &gpu_preferences_);
+ if (command_line->HasSwitch(switches::kWebViewDrawFunctorUsesVulkan) &&
+ base::FeatureList::IsEnabled(features::kWebViewVulkan)) {
+ bool result = InitializeVulkan();
+ // There is no fallback for webview.
+ CHECK(result);
+ } else {
+ DisableInProcessGpuVulkan(&gpu_feature_info_, &gpu_preferences_);
+ }
default_offscreen_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
@@ -775,12 +774,13 @@ bool GpuInit::InitializeVulkan() {
gpu_preferences_.use_vulkan == VulkanImplementationName::kForcedNative;
bool use_swiftshader = gl_use_swiftshader_ || vulkan_use_swiftshader;
- const bool enforce_protected_memory =
- gpu_preferences_.enforce_vulkan_protected_memory;
+ // If |enforce_vulkan_protected_memory| is true, then we expect
+ // |enable_vulkan_protected_memory| to be true.
+ DCHECK(!gpu_preferences_.enforce_vulkan_protected_memory ||
+ gpu_preferences_.enable_vulkan_protected_memory);
vulkan_implementation_ = CreateVulkanImplementation(
- vulkan_use_swiftshader,
- enforce_protected_memory ? true : false /* allow_protected_memory */,
- enforce_protected_memory);
+ vulkan_use_swiftshader, gpu_preferences_.enable_vulkan_protected_memory,
+ gpu_preferences_.enforce_vulkan_protected_memory);
if (!vulkan_implementation_ ||
!vulkan_implementation_->InitializeVulkanInstance(
!gpu_preferences_.disable_vulkan_surface)) {
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 3c236dbc502..45e68e62d69 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -8,13 +8,13 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/config/device_perf_info.h"
-#include "gpu/config/gpu_extra_info.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_preferences.h"
#include "gpu/ipc/service/gpu_ipc_service_export.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "gpu/vulkan/buildflags.h"
+#include "ui/gfx/gpu_extra_info.h"
namespace base {
class CommandLine;
@@ -32,7 +32,7 @@ class GPU_IPC_SERVICE_EXPORT GpuSandboxHelper {
public:
virtual ~GpuSandboxHelper() = default;
- virtual void PreSandboxStartup() = 0;
+ virtual void PreSandboxStartup(const GpuPreferences& gpu_prefs) = 0;
virtual bool EnsureSandboxInitialized(GpuWatchdogThread* watchdog_thread,
const GPUInfo* gpu_info,
@@ -57,7 +57,7 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
const GPUInfo& gpu_info() const { return gpu_info_; }
const GpuFeatureInfo& gpu_feature_info() const { return gpu_feature_info_; }
- const GpuExtraInfo& gpu_extra_info() const { return gpu_extra_info_; }
+ const gfx::GpuExtraInfo& gpu_extra_info() const { return gpu_extra_info_; }
const base::Optional<GPUInfo>& gpu_info_for_hardware_gpu() const {
return gpu_info_for_hardware_gpu_;
}
@@ -99,7 +99,7 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
base::Optional<GPUInfo> gpu_info_for_hardware_gpu_;
base::Optional<GpuFeatureInfo> gpu_feature_info_for_hardware_gpu_;
- GpuExtraInfo gpu_extra_info_;
+ gfx::GpuExtraInfo gpu_extra_info_;
// The following data are collected by the info collection GPU process.
base::Optional<DevicePerfInfo> device_perf_info_;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc
index 650eb7df005..5263e6aeb92 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_android_hardware_buffer.cc
@@ -12,7 +12,7 @@
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/ahardwarebuffer_utils.h"
#include "gpu/ipc/common/gpu_memory_buffer_impl_android_hardware_buffer.h"
-#include "ui/gl/android/android_surface_control_compat.h"
+#include "ui/gfx/android/android_surface_control_compat.h"
#include "ui/gl/gl_image_ahardwarebuffer.h"
namespace gpu {
@@ -29,7 +29,7 @@ AHardwareBuffer_Desc GetBufferDescription(const gfx::Size& size,
hwb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
if (usage == gfx::BufferUsage::SCANOUT)
- hwb_desc.usage |= gl::SurfaceControl::RequiredUsage();
+ hwb_desc.usage |= gfx::SurfaceControl::RequiredUsage();
// Number of images in an image array.
hwb_desc.layers = 1;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
index 2650f198d81..8113282b3e7 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_io_surface.cc
@@ -19,12 +19,10 @@
namespace gpu {
namespace {
+
// A GpuMemoryBuffer with client_id = 0 behaves like anonymous shared memory.
const int kAnonymousClientId = 0;
-// The maximum number of times to dump before throttling (to avoid sending
-// thousands of crash dumps).
-const int kMaxCrashDumps = 10;
} // namespace
GpuMemoryBufferFactoryIOSurface::GpuMemoryBufferFactoryIOSurface() {
@@ -56,27 +54,7 @@ GpuMemoryBufferFactoryIOSurface::CreateGpuMemoryBuffer(
gfx::GpuMemoryBufferHandle handle;
handle.type = gfx::IO_SURFACE_BUFFER;
handle.id = id;
- handle.mach_port.reset(IOSurfaceCreateMachPort(io_surface));
- CHECK(handle.mach_port);
-
- // This IOSurface will be opened via mach port in the client process. It has
- // been observed in https://crbug.com/574014 that these ports sometimes fail
- // to be opened in the client process. It has further been observed in
- // https://crbug.com/795649#c30 that these ports fail to be opened in creating
- // process. To determine if these failures are independent, attempt to open
- // the creating process first (and don't not return those that fail).
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface_from_mach_port(
- IOSurfaceLookupFromMachPort(handle.mach_port.get()));
- if (!io_surface_from_mach_port) {
- LOG(ERROR) << "Failed to locally open IOSurface from mach port to be "
- "returned to client, not returning to client.";
- static int dump_counter = kMaxCrashDumps;
- if (dump_counter) {
- dump_counter -= 1;
- base::debug::DumpWithoutCrashing();
- }
- return gfx::GpuMemoryBufferHandle();
- }
+ handle.io_surface = io_surface;
{
base::AutoLock lock(io_surfaces_lock_);
@@ -127,8 +105,8 @@ GpuMemoryBufferFactoryIOSurface::CreateImageForGpuMemoryBuffer(
IOSurfaceMap::iterator it = io_surfaces_.find(key);
if (it != io_surfaces_.end())
io_surface = it->second;
- } else if (handle.mach_port) {
- io_surface.reset(IOSurfaceLookupFromMachPort(handle.mach_port.get()));
+ } else if (handle.io_surface) {
+ io_surface = handle.io_surface;
if (!io_surface) {
DLOG(ERROR) << "Failed to open IOSurface from handle.";
return nullptr;
@@ -181,28 +159,6 @@ GpuMemoryBufferFactoryIOSurface::CreateAnonymousImage(
return nullptr;
}
- // This IOSurface does not require passing via a mach port, but attempt to
- // locally open via a mach port to gather data to include in a Radar about
- // this failure.
- // https://crbug.com/795649
- gfx::ScopedRefCountedIOSurfaceMachPort mach_port(
- IOSurfaceCreateMachPort(io_surface));
- if (mach_port) {
- base::ScopedCFTypeRef<IOSurfaceRef> io_surface_from_mach_port(
- IOSurfaceLookupFromMachPort(mach_port.get()));
- if (!io_surface_from_mach_port) {
- LOG(ERROR) << "Failed to locally open anonymous IOSurface mach port "
- "(ignoring failure).";
- static int dump_counter = kMaxCrashDumps;
- if (dump_counter) {
- dump_counter -= 1;
- base::debug::DumpWithoutCrashing();
- }
- }
- } else {
- LOG(ERROR) << "Failed to create IOSurface mach port.";
- }
-
unsigned internalformat = gl::BufferFormatToGLInternalFormat(format);
scoped_refptr<gl::GLImageIOSurface> image(
gl::GLImageIOSurface::Create(size, internalformat));
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
index b9abc433124..3c1eb8e48bc 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_test_template.h
@@ -68,7 +68,9 @@ TYPED_TEST_P(GpuMemoryBufferFactoryTest, CreateGpuMemoryBuffer) {
gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_VDA_WRITE,
+ gfx::BufferUsage::PROTECTED_SCANOUT_VDA_WRITE,
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
+ gfx::BufferUsage::SCANOUT_VEA_CPU_READ,
gfx::BufferUsage::SCANOUT_VEA_READ_CAMERA_AND_CPU_READ_WRITE,
};
for (auto usage : usages) {
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index a96193fa95e..fac06bb181c 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -4,111 +4,124 @@
#include "gpu/ipc/service/gpu_watchdog_thread.h"
+#include "base/atomicops.h"
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/bit_cast.h"
+#include "base/callback_helpers.h"
#include "base/debug/alias.h"
#include "base/debug/dump_without_crashing.h"
+#include "base/files/file_path.h"
#include "base/files/file_util.h"
-#include "base/format_macros.h"
-#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/metrics/field_trial_params.h"
#include "base/metrics/histogram_functions.h"
+#include "base/native_library.h"
+#include "base/numerics/safe_conversions.h"
#include "base/power_monitor/power_monitor.h"
#include "base/process/process.h"
-#include "base/single_thread_task_runner.h"
#include "base/strings/string_number_conversions.h"
-#include "base/strings/stringprintf.h"
#include "base/system/sys_info.h"
#include "base/task/current_thread.h"
+#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "gpu/config/gpu_crash_keys.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/result_codes.h"
-#include "ui/gl/shader_tracking.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_version.h"
#endif
namespace gpu {
-namespace {
-
-#if defined(CYGPROFILE_INSTRUMENTATION)
-const int kGpuTimeout = 30000;
-#elif defined(OS_WIN) || defined(OS_MAC)
-// Use a slightly longer timeout on Windows due to prevalence of slow and
-// infected machines.
+#if defined(OS_WIN)
+base::TimeDelta GetGpuWatchdogTimeoutBasedOnCpuCores() {
+ if (base::win::GetVersion() >= base::win::Version::WIN10) {
+ int num_of_processors = base::SysInfo::NumberOfProcessors();
+
+ if (num_of_processors > 8)
+ return (kGpuWatchdogTimeout - base::TimeDelta::FromSeconds(10));
+ else if (num_of_processors <= 4)
+ return kGpuWatchdogTimeout + base::TimeDelta::FromSeconds(5);
+ }
-// Also use a slightly longer timeout on MacOSX to get rid of GPU process
-// hangs at context creation during startup. See https://crbug.com/918490.
-const int kGpuTimeout = 15000;
-#else
-const int kGpuTimeout = 10000;
+ return kGpuWatchdogTimeout;
+}
#endif
-// The same set of timeouts from Watchdog V2 so we can compare the results
-// between V1 and V2.
-#if defined(CYGPROFILE_INSTRUMENTATION)
-const int kNewGpuTimeout = 30000;
-#elif defined(OS_MAC)
-const int kNewGpuTimeout = 17000;
-#else
-const int kNewGpuTimeout = 15000;
+GpuWatchdogThread::GpuWatchdogThread(base::TimeDelta timeout,
+ int init_factor,
+ int restart_factor,
+ int max_extra_cycles_before_kill,
+ bool is_test_mode)
+ : base::Thread("GpuWatchdog"),
+ watchdog_timeout_(timeout),
+ watchdog_init_factor_(init_factor),
+ watchdog_restart_factor_(restart_factor),
+ in_gpu_initialization_(true),
+ max_extra_cycles_before_kill_(max_extra_cycles_before_kill),
+ is_test_mode_(is_test_mode),
+ watched_gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
+ base::CurrentThread::Get()->AddTaskObserver(this);
+ num_of_processors_ = base::SysInfo::NumberOfProcessors();
+
+#if defined(OS_WIN)
+ // GetCurrentThread returns a pseudo-handle that cannot be used by one thread
+ // to identify another. DuplicateHandle creates a "real" handle that can be
+ // used for this purpose.
+ if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
+ GetCurrentProcess(), &watched_thread_handle_,
+ THREAD_QUERY_INFORMATION, FALSE, 0)) {
+ watched_thread_handle_ = nullptr;
+ }
#endif
#if defined(USE_X11)
-const base::FilePath::CharType kTtyFilePath[] =
- FILE_PATH_LITERAL("/sys/class/tty/tty0/active");
+ tty_file_ = base::OpenFile(
+ base::FilePath(FILE_PATH_LITERAL("/sys/class/tty/tty0/active")), "r");
+ UpdateActiveTTY();
+ host_tty_ = active_tty_;
#endif
-} // namespace
+ Arm();
+}
-GpuWatchdogThreadImplV1::GpuWatchdogThreadImplV1()
- : watched_task_runner_(base::ThreadTaskRunnerHandle::Get()),
- armed_(false),
- task_observer_(this),
- use_thread_cpu_time_(true),
- responsive_acknowledge_count_(0),
-#if defined(OS_WIN)
- watched_thread_handle_(0),
- arm_cpu_time_(),
-#endif
- suspension_counter_(this)
-#if defined(USE_X11)
- ,
- host_tty_(-1)
-#endif
-{
- if (base::FeatureList::IsEnabled(features::kGpuWatchdogV1NewTimeout))
- timeout_ = base::TimeDelta::FromMilliseconds(kNewGpuTimeout);
- else
- timeout_ = base::TimeDelta::FromMilliseconds(kGpuTimeout);
+GpuWatchdogThread::~GpuWatchdogThread() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+ // Stop() might take too long and the watchdog timeout is triggered.
+ // Disarm first before calling Stop() to avoid a crash.
+ if (IsArmed())
+ Disarm();
+ PauseWatchdog();
- base::subtle::NoBarrier_Store(&awaiting_acknowledge_, false);
+ Stop(); // stop the watchdog thread
+ base::CurrentThread::Get()->RemoveTaskObserver(this);
+ base::PowerMonitor::RemoveObserver(this);
+ GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogEnd);
#if defined(OS_WIN)
- // GetCurrentThread returns a pseudo-handle that cannot be used by one thread
- // to identify another. DuplicateHandle creates a "real" handle that can be
- // used for this purpose.
- BOOL result = DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
- GetCurrentProcess(), &watched_thread_handle_,
- THREAD_QUERY_INFORMATION, FALSE, 0);
- DCHECK(result);
+ if (watched_thread_handle_)
+ CloseHandle(watched_thread_handle_);
#endif
#if defined(USE_X11)
- tty_file_ = base::OpenFile(base::FilePath(kTtyFilePath), "r");
- UpdateActiveTTY();
- host_tty_ = active_tty_;
+ if (tty_file_)
+ fclose(tty_file_);
#endif
- base::CurrentThread::Get()->AddTaskObserver(&task_observer_);
}
// static
-std::unique_ptr<GpuWatchdogThreadImplV1> GpuWatchdogThreadImplV1::Create(
- bool start_backgrounded) {
- auto watchdog_thread = base::WrapUnique(new GpuWatchdogThreadImplV1);
+std::unique_ptr<GpuWatchdogThread> GpuWatchdogThread::Create(
+ bool start_backgrounded,
+ base::TimeDelta timeout,
+ int init_factor,
+ int restart_factor,
+ int max_extra_cycles_before_kill,
+ bool is_test_mode) {
+ auto watchdog_thread = base::WrapUnique(
+ new GpuWatchdogThread(timeout, init_factor, restart_factor,
+ max_extra_cycles_before_kill, is_test_mode));
base::Thread::Options options;
options.timer_slack = base::TIMER_SLACK_MAXIMUM;
watchdog_thread->StartWithOptions(options);
@@ -117,346 +130,567 @@ std::unique_ptr<GpuWatchdogThreadImplV1> GpuWatchdogThreadImplV1::Create(
return watchdog_thread;
}
-void GpuWatchdogThreadImplV1::CheckArmed() {
- base::subtle::NoBarrier_Store(&awaiting_acknowledge_, false);
+// static
+std::unique_ptr<GpuWatchdogThread> GpuWatchdogThread::Create(
+ bool start_backgrounded) {
+ base::TimeDelta gpu_watchdog_timeout = kGpuWatchdogTimeout;
+ int init_factor = kInitFactor;
+ int restart_factor = kRestartFactor;
+ int max_extra_cycles_before_kill = kMaxExtraCyclesBeforeKill;
+
+#if defined(OS_WIN)
+ gpu_watchdog_timeout = GetGpuWatchdogTimeoutBasedOnCpuCores();
+#endif
+
+ if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2NewTimeout)) {
+ const char kNewTimeOutParam[] = "new_time_out";
+ const char kMaxExtraCyclesBeforeKillParam[] =
+ "max_extra_cycles_before_kill";
+
+#if defined(OS_WIN)
+ constexpr int kFinchMaxExtraCyclesBeforeKill = 0;
+#elif defined(OS_ANDROID)
+ constexpr int kFinchMaxExtraCyclesBeforeKill = 0;
+ init_factor = kInitFactorFinch;
+ restart_factor = kRestartFactorFinch;
+#elif defined(OS_MAC)
+ constexpr int kFinchMaxExtraCyclesBeforeKill = 1;
+#else
+ constexpr int kFinchMaxExtraCyclesBeforeKill = 2;
+#endif
+
+ int timeout = base::GetFieldTrialParamByFeatureAsInt(
+ features::kGpuWatchdogV2NewTimeout, kNewTimeOutParam,
+ gpu_watchdog_timeout.InSeconds());
+ gpu_watchdog_timeout = base::TimeDelta::FromSeconds(timeout);
+
+ max_extra_cycles_before_kill = base::GetFieldTrialParamByFeatureAsInt(
+ features::kGpuWatchdogV2NewTimeout, kMaxExtraCyclesBeforeKillParam,
+ kFinchMaxExtraCyclesBeforeKill);
+ }
+
+ return Create(start_backgrounded, gpu_watchdog_timeout, init_factor,
+ restart_factor, max_extra_cycles_before_kill, false);
}
-void GpuWatchdogThreadImplV1::ReportProgress() {
- CheckArmed();
+// Do not add power observer during watchdog init, PowerMonitor might not be up
+// running yet.
+void GpuWatchdogThread::AddPowerObserver() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+
+ // Forward it to the watchdog thread. Call PowerMonitor::AddObserver on the
+ // watchdog thread so that OnSuspend and OnResume will be called on watchdog
+ // thread.
+ is_add_power_observer_called_ = true;
+ task_runner()->PostTask(FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnAddPowerObserver,
+ base::Unretained(this)));
}
-void GpuWatchdogThreadImplV1::OnBackgrounded() {
- // As we stop the task runner before destroying this class, the unretained
- // reference will always outlive the task.
+// Android Chrome goes to the background. Called from the gpu thread.
+void GpuWatchdogThread::OnBackgrounded() {
task_runner()->PostTask(
FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV1::OnBackgroundedOnWatchdogThread,
- base::Unretained(this)));
+ base::BindOnce(&GpuWatchdogThread::StopWatchdogTimeoutTask,
+ base::Unretained(this), kAndroidBackgroundForeground));
}
-void GpuWatchdogThreadImplV1::OnForegrounded() {
- // As we stop the task runner before destroying this class, the unretained
- // reference will always outlive the task.
+// Android Chrome goes to the foreground. Called from the gpu thread.
+void GpuWatchdogThread::OnForegrounded() {
task_runner()->PostTask(
FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV1::OnForegroundedOnWatchdogThread,
- base::Unretained(this)));
+ base::BindOnce(&GpuWatchdogThread::RestartWatchdogTimeoutTask,
+ base::Unretained(this), kAndroidBackgroundForeground));
}
-bool GpuWatchdogThreadImplV1::IsGpuHangDetectedForTesting() {
- return false;
-}
+// Called from the gpu thread when gpu init has completed.
+void GpuWatchdogThread::OnInitComplete() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-void GpuWatchdogThreadImplV1::Init() {
- // Schedule the first check.
- OnCheck(false);
+ task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&GpuWatchdogThread::UpdateInitializationFlag,
+ base::Unretained(this)));
+ Disarm();
}
-void GpuWatchdogThreadImplV1::CleanUp() {
- weak_factory_.InvalidateWeakPtrs();
- armed_ = false;
-}
+// Called from the gpu thread in viz::GpuServiceImpl::~GpuServiceImpl().
+// After this, no Disarm() will be called before the watchdog thread is
+// destroyed. If this destruction takes too long, the watchdog timeout
+// will be triggered.
+void GpuWatchdogThread::OnGpuProcessTearDown() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::GpuWatchdogTaskObserver(
- GpuWatchdogThreadImplV1* watchdog)
- : watchdog_(watchdog) {}
+ in_gpu_process_teardown_ = true;
+ if (!IsArmed())
+ Arm();
+}
-GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::~GpuWatchdogTaskObserver() =
- default;
+// Called from the gpu main thread.
+void GpuWatchdogThread::PauseWatchdog() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-void GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::WillProcessTask(
- const base::PendingTask& pending_task,
- bool was_blocked_or_low_priority) {
- watchdog_->CheckArmed();
+ task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&GpuWatchdogThread::StopWatchdogTimeoutTask,
+ base::Unretained(this), kGeneralGpuFlow));
}
-void GpuWatchdogThreadImplV1::GpuWatchdogTaskObserver::DidProcessTask(
- const base::PendingTask& pending_task) {}
+// Called from the gpu main thread.
+void GpuWatchdogThread::ResumeWatchdog() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-GpuWatchdogThreadImplV1::SuspensionCounter::SuspensionCounterRef::
- SuspensionCounterRef(SuspensionCounter* counter)
- : counter_(counter) {
- counter_->OnAddRef();
+ task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&GpuWatchdogThread::RestartWatchdogTimeoutTask,
+ base::Unretained(this), kGeneralGpuFlow));
}
-GpuWatchdogThreadImplV1::SuspensionCounter::SuspensionCounterRef::
- ~SuspensionCounterRef() {
- counter_->OnReleaseRef();
+// Running on the watchdog thread.
+// On Linux, Init() will be called twice for Sandbox Initialization. The
+// watchdog is stopped and then restarted in StartSandboxLinux(). Everything
+// should be the same and continue after the second init().
+void GpuWatchdogThread::Init() {
+ watchdog_thread_task_runner_ = base::ThreadTaskRunnerHandle::Get();
+
+ // Get and Invalidate weak_ptr should be done on the watchdog thread only.
+ weak_ptr_ = weak_factory_.GetWeakPtr();
+ base::TimeDelta timeout = watchdog_timeout_ * kInitFactor;
+ task_runner()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnWatchdogTimeout, weak_ptr_),
+ timeout);
+
+ last_arm_disarm_counter_ = ReadArmDisarmCounter();
+ watchdog_start_timeticks_ = base::TimeTicks::Now();
+ last_on_watchdog_timeout_timeticks_ = watchdog_start_timeticks_;
+ next_on_watchdog_timeout_time_ = base::Time::Now() + timeout;
+
+#if defined(OS_WIN)
+ if (watched_thread_handle_) {
+ if (base::ThreadTicks::IsSupported())
+ base::ThreadTicks::WaitUntilInitialized();
+ last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
+ remaining_watched_thread_ticks_ = timeout;
+ }
+#endif
}
-GpuWatchdogThreadImplV1::SuspensionCounter::SuspensionCounter(
- GpuWatchdogThreadImplV1* watchdog_thread)
- : watchdog_thread_(watchdog_thread) {
- // This class will only be used on the watchdog thread, but is constructed on
- // the main thread. Detach.
- DETACH_FROM_SEQUENCE(watchdog_thread_sequence_checker_);
+// Running on the watchdog thread.
+void GpuWatchdogThread::CleanUp() {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+ weak_factory_.InvalidateWeakPtrs();
}
-std::unique_ptr<
- GpuWatchdogThreadImplV1::SuspensionCounter::SuspensionCounterRef>
-GpuWatchdogThreadImplV1::SuspensionCounter::Take() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(watchdog_thread_sequence_checker_);
- return std::make_unique<SuspensionCounterRef>(this);
+void GpuWatchdogThread::ReportProgress() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+ InProgress();
}
-bool GpuWatchdogThreadImplV1::SuspensionCounter::HasRefs() const {
- DCHECK_CALLED_ON_VALID_SEQUENCE(watchdog_thread_sequence_checker_);
- return suspend_count_ > 0;
+void GpuWatchdogThread::WillProcessTask(const base::PendingTask& pending_task,
+ bool was_blocked_or_low_priority) {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+
+ // The watchdog is armed at the beginning of the gpu process teardown.
+ // Do not call Arm() during teardown.
+ if (in_gpu_process_teardown_)
+ DCHECK(IsArmed());
+ else
+ Arm();
}
-void GpuWatchdogThreadImplV1::SuspensionCounter::OnWatchdogThreadStopped() {
- DETACH_FROM_SEQUENCE(watchdog_thread_sequence_checker_);
+void GpuWatchdogThread::DidProcessTask(const base::PendingTask& pending_task) {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- // Null the |watchdog_thread_| ptr at shutdown to avoid trying to suspend or
- // resume after the thread is stopped.
- watchdog_thread_ = nullptr;
+ // Keep the watchdog armed during tear down.
+ if (in_gpu_process_teardown_)
+ InProgress();
+ else
+ Disarm();
}
-void GpuWatchdogThreadImplV1::SuspensionCounter::OnAddRef() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(watchdog_thread_sequence_checker_);
- suspend_count_++;
- if (watchdog_thread_ && suspend_count_ == 1)
- watchdog_thread_->SuspendStateChanged();
+// Power Suspends. Running on the watchdog thread.
+void GpuWatchdogThread::OnSuspend() {
+ StopWatchdogTimeoutTask(kPowerSuspendResume);
}
-void GpuWatchdogThreadImplV1::SuspensionCounter::OnReleaseRef() {
- DCHECK_CALLED_ON_VALID_SEQUENCE(watchdog_thread_sequence_checker_);
- DCHECK_GT(suspend_count_, 0u);
- suspend_count_--;
- if (watchdog_thread_ && suspend_count_ == 0)
- watchdog_thread_->SuspendStateChanged();
+// Power Resumes. Running on the watchdog thread.
+void GpuWatchdogThread::OnResume() {
+ RestartWatchdogTimeoutTask(kPowerSuspendResume);
}
-GpuWatchdogThreadImplV1::~GpuWatchdogThreadImplV1() {
- DCHECK(watched_task_runner_->BelongsToCurrentThread());
+// Running on the watchdog thread.
+void GpuWatchdogThread::OnAddPowerObserver() {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+ DCHECK(base::PowerMonitor::IsInitialized());
- Stop();
- suspension_counter_.OnWatchdogThreadStopped();
+ base::PowerMonitor::AddObserver(this);
+ is_power_observer_added_ = true;
+}
+
+// Running on the watchdog thread.
+void GpuWatchdogThread::RestartWatchdogTimeoutTask(
+ PauseResumeSource source_of_request) {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+ base::TimeDelta timeout;
+
+ switch (source_of_request) {
+ case kAndroidBackgroundForeground:
+ if (!is_backgrounded_)
+ return;
+ is_backgrounded_ = false;
+ timeout = watchdog_timeout_ * watchdog_restart_factor_;
+ foregrounded_timeticks_ = base::TimeTicks::Now();
+ foregrounded_event_ = true;
+ num_of_timeout_after_foregrounded_ = 0;
+ break;
+ case kPowerSuspendResume:
+ if (!in_power_suspension_)
+ return;
+ in_power_suspension_ = false;
+ timeout = watchdog_timeout_ * watchdog_restart_factor_;
+ power_resume_timeticks_ = base::TimeTicks::Now();
+ power_resumed_event_ = true;
+ num_of_timeout_after_power_resume_ = 0;
+ break;
+ case kGeneralGpuFlow:
+ if (!is_paused_)
+ return;
+ is_paused_ = false;
+ timeout = watchdog_timeout_ * watchdog_init_factor_;
+ watchdog_resume_timeticks_ = base::TimeTicks::Now();
+ break;
+ }
+ if (!is_backgrounded_ && !in_power_suspension_ && !is_paused_) {
+ weak_ptr_ = weak_factory_.GetWeakPtr();
+ task_runner()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnWatchdogTimeout, weak_ptr_),
+ timeout);
+ last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
+ next_on_watchdog_timeout_time_ = base::Time::Now() + timeout;
+ last_arm_disarm_counter_ = ReadArmDisarmCounter();
#if defined(OS_WIN)
- CloseHandle(watched_thread_handle_);
+ if (watched_thread_handle_) {
+ last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
+ remaining_watched_thread_ticks_ = timeout;
+ }
#endif
+ }
+}
- base::PowerMonitor::RemoveObserver(this);
+void GpuWatchdogThread::StopWatchdogTimeoutTask(
+ PauseResumeSource source_of_request) {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+
+ switch (source_of_request) {
+ case kAndroidBackgroundForeground:
+ if (is_backgrounded_)
+ return;
+ is_backgrounded_ = true;
+ backgrounded_timeticks_ = base::TimeTicks::Now();
+ foregrounded_event_ = false;
+ break;
+ case kPowerSuspendResume:
+ if (in_power_suspension_)
+ return;
+ in_power_suspension_ = true;
+ power_suspend_timeticks_ = base::TimeTicks::Now();
+ power_resumed_event_ = false;
+ break;
+ case kGeneralGpuFlow:
+ if (is_paused_)
+ return;
+ is_paused_ = true;
+ watchdog_pause_timeticks_ = base::TimeTicks::Now();
+ break;
+ }
-#if defined(USE_X11)
- if (tty_file_)
- fclose(tty_file_);
-#endif
+ // Revoke any pending watchdog timeout task
+ weak_factory_.InvalidateWeakPtrs();
+}
- base::CurrentThread::Get()->RemoveTaskObserver(&task_observer_);
+void GpuWatchdogThread::UpdateInitializationFlag() {
+ in_gpu_initialization_ = false;
}
-void GpuWatchdogThreadImplV1::OnAcknowledge() {
- CHECK(base::PlatformThread::CurrentId() == GetThreadId());
+// Called from the gpu main thread.
+// The watchdog is armed only in these three functions -
+// GpuWatchdogThread(), WillProcessTask(), and OnGpuProcessTearDown()
+void GpuWatchdogThread::Arm() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- // The check has already been acknowledged and another has already been
- // scheduled by a previous call to OnAcknowledge. It is normal for a
- // watched thread to see armed_ being true multiple times before
- // the OnAcknowledge task is run on the watchdog thread.
- if (!armed_)
- return;
+ base::subtle::NoBarrier_AtomicIncrement(&arm_disarm_counter_, 1);
- // Revoke any pending hang termination.
- weak_factory_.InvalidateWeakPtrs();
- armed_ = false;
+ // Arm/Disarm are always called in sequence. Now it's an odd number.
+ DCHECK(IsArmed());
+}
- if (suspension_counter_.HasRefs()) {
- responsive_acknowledge_count_ = 0;
- return;
- }
+void GpuWatchdogThread::Disarm() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- base::Time current_time = base::Time::Now();
+ base::subtle::NoBarrier_AtomicIncrement(&arm_disarm_counter_, 1);
- // The watchdog waits until at least 6 consecutive checks have returned in
- // less than 50 ms before it will start ignoring the CPU time in determining
- // whether to timeout. This is a compromise to allow startups that are slow
- // due to disk contention to avoid timing out, but once the GPU process is
- // running smoothly the watchdog will be able to detect hangs that don't use
- // the CPU.
- if ((current_time - check_time_) < base::TimeDelta::FromMilliseconds(50))
- responsive_acknowledge_count_++;
- else
- responsive_acknowledge_count_ = 0;
+ // Arm/Disarm are always called in sequence. Now it's an even number.
+ DCHECK(!IsArmed());
+}
- if (responsive_acknowledge_count_ >= 6)
- use_thread_cpu_time_ = false;
+void GpuWatchdogThread::InProgress() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- // If it took a long time for the acknowledgement, assume the computer was
- // recently suspended.
- bool was_suspended = (current_time > suspension_timeout_);
+ // Increment by 2. This is equivalent to Disarm() + Arm().
+ base::subtle::NoBarrier_AtomicIncrement(&arm_disarm_counter_, 2);
- // The monitored thread has responded. Post a task to check it again.
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV1::OnCheck,
- weak_factory_.GetWeakPtr(), was_suspended),
- 0.5 * timeout_);
+ // Now it's an odd number.
+ DCHECK(IsArmed());
}
-void GpuWatchdogThreadImplV1::OnCheck(bool after_suspend) {
- CHECK(base::PlatformThread::CurrentId() == GetThreadId());
+bool GpuWatchdogThread::IsArmed() {
+ // It's an odd number.
+ return base::subtle::NoBarrier_Load(&arm_disarm_counter_) & 1;
+}
- // Do not create any new termination tasks if one has already been created
- // or the system is suspended.
- if (armed_ || suspension_counter_.HasRefs())
- return;
+base::subtle::Atomic32 GpuWatchdogThread::ReadArmDisarmCounter() {
+ return base::subtle::NoBarrier_Load(&arm_disarm_counter_);
+}
- armed_ = true;
+// Running on the watchdog thread.
+void GpuWatchdogThread::OnWatchdogTimeout() {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+ DCHECK(!is_backgrounded_);
+ DCHECK(!in_power_suspension_);
+ DCHECK(!is_paused_);
- // Must set |awaiting_acknowledge_| before posting the task. This task might
- // be the only task that will activate the TaskObserver on the watched thread
- // and it must not miss the false -> true transition. No barrier is needed
- // here, as the PostTask which follows contains a barrier.
- base::subtle::NoBarrier_Store(&awaiting_acknowledge_, true);
+ // If this metric is added too early (eg. watchdog creation time), it cannot
+ // be persistent. The histogram data will be lost after crash or browser exit.
+ // Delay the recording of kGpuWatchdogStart until the firs
+ // OnWatchdogTimeout() to ensure this metric is created in the persistent
+ // memory.
+ if (!is_watchdog_start_histogram_recorded) {
+ is_watchdog_start_histogram_recorded = true;
+ GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
+ }
-#if defined(OS_WIN)
- arm_cpu_time_ = GetWatchedThreadTime();
+ auto arm_disarm_counter = ReadArmDisarmCounter();
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
+ if (power_resumed_event_)
+ num_of_timeout_after_power_resume_++;
+ if (foregrounded_event_)
+ num_of_timeout_after_foregrounded_++;
- QueryUnbiasedInterruptTime(&arm_interrupt_time_);
+#if defined(USE_X11)
+ UpdateActiveTTY();
#endif
- check_time_ = base::Time::Now();
- check_timeticks_ = base::TimeTicks::Now();
- // Immediately after the computer is woken up from being suspended it might
- // be pretty sluggish, so allow some extra time before the next timeout.
- base::TimeDelta timeout = timeout_ * (after_suspend ? 3 : 1);
- suspension_timeout_ = check_time_ + timeout * 2;
+ // Collect all needed info for gpu hang detection.
+ bool disarmed = arm_disarm_counter % 2 == 0; // even number
+ bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_;
+ bool no_gpu_hang = disarmed || gpu_makes_progress || SlowWatchdogThread();
- // Post a task to the monitored thread that does nothing but wake up the
- // TaskObserver. Any other tasks that are pending on the watched thread will
- // also wake up the observer. This simply ensures there is at least one.
- watched_task_runner_->PostTask(FROM_HERE, base::DoNothing());
+ bool watched_thread_needs_more_time =
+ WatchedThreadNeedsMoreThreadTime(no_gpu_hang);
+ no_gpu_hang = no_gpu_hang || watched_thread_needs_more_time ||
+ ContinueOnNonHostX11ServerTty();
- // Post a task to the watchdog thread to exit if the monitored thread does
- // not respond in time.
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV1::OnCheckTimeout,
- weak_factory_.GetWeakPtr()),
- timeout);
-}
+ bool allows_extra_timeout = WatchedThreadGetsExtraTimeout(no_gpu_hang);
+ no_gpu_hang = no_gpu_hang || allows_extra_timeout;
+
+ // No gpu hang. Continue with another OnWatchdogTimeout task.
+ if (no_gpu_hang) {
+ last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
+ next_on_watchdog_timeout_time_ = base::Time::Now() + watchdog_timeout_;
+ last_arm_disarm_counter_ = ReadArmDisarmCounter();
+
+ task_runner()->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&GpuWatchdogThread::OnWatchdogTimeout, weak_ptr_),
+ watchdog_timeout_);
+ return;
+ }
+
+ // Still armed without any progress. GPU possibly hangs.
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
+#if defined(OS_WIN)
+ if (less_than_full_thread_time_after_capped_)
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKillOnLessThreadTime);
+#endif
-void GpuWatchdogThreadImplV1::OnCheckTimeout() {
DeliberatelyTerminateToRecoverFromHang();
}
-// Use the --disable-gpu-watchdog command line switch to disable this.
-void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
- // Should not get here while the system is suspended.
- DCHECK(!suspension_counter_.HasRefs());
+bool GpuWatchdogThread::SlowWatchdogThread() {
+ // If it takes 15 more seconds than the expected time between two
+ // OnWatchdogTimeout() calls, the system is considered slow and it's not a GPU
+ // hang.
+ bool slow_watchdog_thread =
+ (base::Time::Now() - next_on_watchdog_timeout_time_) >=
+ base::TimeDelta::FromSeconds(15);
- // If the watchdog woke up significantly behind schedule, disarm and reset
- // the watchdog check. This is to prevent the watchdog thread from terminating
- // when a machine wakes up from sleep or hibernation, which would otherwise
- // appear to be a hang.
- if (base::Time::Now() > suspension_timeout_) {
- OnAcknowledge();
- return;
+ // Record this case only when a GPU hang is detected and the thread is slow.
+ if (slow_watchdog_thread)
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kSlowWatchdogThread);
+
+ return slow_watchdog_thread;
+}
+
+bool GpuWatchdogThread::WatchedThreadNeedsMoreThreadTime(
+ bool no_gpu_hang_detected) {
+#if defined(OS_WIN)
+ if (!watched_thread_handle_)
+ return false;
+
+ // We allow extra thread time. When that runs out, we extend extra timeout
+ // cycles. Now, we are extending extra timeout cycles. Don't add extra thread
+ // time.
+ if (count_of_extra_cycles_ > 0)
+ return false;
+
+ WatchedThreadNeedsMoreThreadTimeHistogram(
+ no_gpu_hang_detected,
+ /*start_of_more_thread_time*/ false);
+
+ if (!no_gpu_hang_detected && count_of_more_gpu_thread_time_allowed_ >=
+ kMaxCountOfMoreGpuThreadTimeAllowed) {
+ less_than_full_thread_time_after_capped_ = true;
+ } else {
+ less_than_full_thread_time_after_capped_ = false;
}
- if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
- OnAcknowledge();
- return;
+ // Calculate how many thread ticks the watched thread spent doing the work.
+ base::ThreadTicks now = GetWatchedThreadTime();
+ base::TimeDelta thread_time_elapsed =
+ now - last_on_watchdog_timeout_thread_ticks_;
+ last_on_watchdog_timeout_thread_ticks_ = now;
+ remaining_watched_thread_ticks_ -= thread_time_elapsed;
+
+ if (no_gpu_hang_detected ||
+ count_of_more_gpu_thread_time_allowed_ >=
+ kMaxCountOfMoreGpuThreadTimeAllowed ||
+ thread_time_elapsed < base::TimeDelta() /* bogus data */ ||
+ remaining_watched_thread_ticks_ <= base::TimeDelta()) {
+ // Reset the remaining thread ticks.
+ remaining_watched_thread_ticks_ = watchdog_timeout_;
+ count_of_more_gpu_thread_time_allowed_ = 0;
+
+ return false;
+ } else {
+ // This is the start of allowing more thread time.
+ if (count_of_more_gpu_thread_time_allowed_ == 0) {
+ WatchedThreadNeedsMoreThreadTimeHistogram(
+ no_gpu_hang_detected, /*start_of_more_thread_time*/ true);
+ }
+ count_of_more_gpu_thread_time_allowed_++;
+
+ return true;
}
+#else
+ return false;
+#endif
+}
#if defined(OS_WIN)
- // Defer termination until a certain amount of CPU time has elapsed on the
- // watched thread.
- base::ThreadTicks current_cpu_time = GetWatchedThreadTime();
- base::TimeDelta time_since_arm = current_cpu_time - arm_cpu_time_;
- if (use_thread_cpu_time_ && (time_since_arm < timeout_)) {
+base::ThreadTicks GpuWatchdogThread::GetWatchedThreadTime() {
+ DCHECK(watched_thread_handle_);
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV1::OnCheckTimeout,
- weak_factory_.GetWeakPtr()),
- timeout_ - time_since_arm);
- return;
+ if (base::ThreadTicks::IsSupported()) {
+ // Note: GetForThread() might return bogus results if running on different
+ // CPUs between two calls.
+ return base::ThreadTicks::GetForThread(
+ base::PlatformThreadHandle(watched_thread_handle_));
+ } else {
+ FILETIME creation_time;
+ FILETIME exit_time;
+ FILETIME kernel_time;
+ FILETIME user_time;
+ BOOL result = GetThreadTimes(watched_thread_handle_, &creation_time,
+ &exit_time, &kernel_time, &user_time);
+ if (!result)
+ return base::ThreadTicks();
+
+ // Need to bit_cast to fix alignment, then divide by 10 to convert
+ // 100-nanoseconds to microseconds.
+ int64_t user_time_us = bit_cast<int64_t, FILETIME>(user_time) / 10;
+ int64_t kernel_time_us = bit_cast<int64_t, FILETIME>(kernel_time) / 10;
+
+ return base::ThreadTicks() +
+ base::TimeDelta::FromMicroseconds(user_time_us + kernel_time_us);
}
+}
#endif
- // For minimal developer annoyance, don't keep terminating. You need to skip
- // the call to base::Process::Terminate below in a debugger for this to be
- // useful.
- static bool terminated = false;
- if (terminated)
+bool GpuWatchdogThread::WatchedThreadGetsExtraTimeout(bool no_gpu_hang) {
+ if (max_extra_cycles_before_kill_ == 0)
+ return false;
+
+ // We want to record histograms even if there is no gpu hang.
+ bool allows_more_timeouts = false;
+ WatchedThreadGetsExtraTimeoutHistogram(no_gpu_hang);
+
+ if (no_gpu_hang) {
+ if (count_of_extra_cycles_ > 0) {
+ count_of_extra_cycles_ = 0;
+ }
+ } else if (count_of_extra_cycles_ < max_extra_cycles_before_kill_) {
+ count_of_extra_cycles_++;
+ allows_more_timeouts = true;
+ }
+
+ return allows_more_timeouts;
+}
+
+void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
+ DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
+ // If this is for gpu testing, do not terminate the gpu process.
+ if (is_test_mode_) {
+ test_result_timeout_and_gpu_hang_.Set();
return;
+ }
#if defined(OS_WIN)
if (IsDebuggerPresent())
return;
#endif
-#if defined(USE_X11)
- // Don't crash if we're not on the TTY of our host X11 server.
- UpdateActiveTTY();
- if (host_tty_ != -1 && active_tty_ != -1 && host_tty_ != active_tty_) {
- OnAcknowledge();
- return;
- }
-#endif
-
-// Store variables so they're available in crash dumps to help determine the
-// cause of any hang.
+ // Store variables so they're available in crash dumps to help determine the
+ // cause of any hang.
+ base::TimeTicks function_begin_timeticks = base::TimeTicks::Now();
+ base::debug::Alias(&in_gpu_initialization_);
+ base::debug::Alias(&num_of_timeout_after_power_resume_);
+ base::debug::Alias(&num_of_timeout_after_foregrounded_);
+ base::debug::Alias(&function_begin_timeticks);
+ base::debug::Alias(&watchdog_start_timeticks_);
+ base::debug::Alias(&power_suspend_timeticks_);
+ base::debug::Alias(&power_resume_timeticks_);
+ base::debug::Alias(&backgrounded_timeticks_);
+ base::debug::Alias(&foregrounded_timeticks_);
+ base::debug::Alias(&watchdog_pause_timeticks_);
+ base::debug::Alias(&watchdog_resume_timeticks_);
+ base::debug::Alias(&in_power_suspension_);
+ base::debug::Alias(&in_gpu_process_teardown_);
+ base::debug::Alias(&is_backgrounded_);
+ base::debug::Alias(&is_add_power_observer_called_);
+ base::debug::Alias(&is_power_observer_added_);
+ base::debug::Alias(&last_on_watchdog_timeout_timeticks_);
+ base::TimeDelta timeticks_elapses =
+ function_begin_timeticks - last_on_watchdog_timeout_timeticks_;
+ base::debug::Alias(&timeticks_elapses);
+ base::debug::Alias(&max_extra_cycles_before_kill_);
#if defined(OS_WIN)
- ULONGLONG fire_interrupt_time;
- QueryUnbiasedInterruptTime(&fire_interrupt_time);
-
- // This is the time since the watchdog was armed, in 100ns intervals,
- // ignoring time where the computer is suspended.
- ULONGLONG interrupt_delay = fire_interrupt_time - arm_interrupt_time_;
-
- base::debug::Alias(&interrupt_delay);
- base::debug::Alias(&current_cpu_time);
- base::debug::Alias(&time_since_arm);
+ base::debug::Alias(&remaining_watched_thread_ticks_);
+ base::debug::Alias(&less_than_full_thread_time_after_capped_);
+#endif
- bool using_thread_ticks = base::ThreadTicks::IsSupported();
- base::debug::Alias(&using_thread_ticks);
+ GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
- bool using_high_res_timer = base::TimeTicks::IsHighResolution();
- base::debug::Alias(&using_high_res_timer);
-#endif
+ crash_keys::gpu_watchdog_crashed_in_gpu_init.Set(
+ in_gpu_initialization_ ? "1" : "0");
- int32_t awaiting_acknowledge =
- base::subtle::NoBarrier_Load(&awaiting_acknowledge_);
- base::debug::Alias(&awaiting_acknowledge);
-
- // Don't log the message to stderr in release builds because the buffer
- // may be full.
- std::string message = base::StringPrintf(
- "The GPU process hung. Terminating after %" PRId64 " ms.",
- timeout_.InMilliseconds());
- logging::LogMessageHandlerFunction handler = logging::GetLogMessageHandler();
- if (handler)
- handler(logging::LOG_ERROR, __FILE__, __LINE__, 0, message);
- DLOG(ERROR) << message;
-
- base::Time current_time = base::Time::Now();
- base::TimeTicks current_timeticks = base::TimeTicks::Now();
- base::debug::Alias(&current_time);
- base::debug::Alias(&current_timeticks);
-
- int64_t available_physical_memory =
- base::SysInfo::AmountOfAvailablePhysicalMemory() >> 20;
- crash_keys::available_physical_memory_in_mb.Set(
- base::NumberToString(available_physical_memory));
-
- gl::ShaderTracking* shader_tracking = gl::ShaderTracking::GetInstance();
- if (shader_tracking) {
- std::string shaders[2];
- shader_tracking->GetShaders(shaders, shaders + 1);
- crash_keys::current_shader_0.Set(shaders[0]);
- crash_keys::current_shader_1.Set(shaders[1]);
- }
+ crash_keys::gpu_watchdog_kill_after_power_resume.Set(
+ WithinOneMinFromPowerResumed() ? "1" : "0");
- // Check it one last time before crashing.
- if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
- OnAcknowledge();
- return;
- }
+ crash_keys::num_of_processors.Set(base::NumberToString(num_of_processors_));
- terminated = true;
+ // Check the arm_disarm_counter value one more time.
+ auto last_arm_disarm_counter = ReadArmDisarmCounter();
+ base::debug::Alias(&last_arm_disarm_counter);
// Use RESULT_CODE_HUNG so this crash is separated from other
// EXCEPTION_ACCESS_VIOLATION buckets for UMA analysis.
@@ -466,90 +700,177 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
base::Process::TerminateCurrentProcessImmediately(RESULT_CODE_HUNG);
}
-void GpuWatchdogThreadImplV1::AddPowerObserver() {
- // As we stop the task runner before destroying this class, the unretained
- // reference will always outlive the task.
- task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&GpuWatchdogThreadImplV1::OnAddPowerObserver,
- base::Unretained(this)));
+void GpuWatchdogThread::GpuWatchdogHistogram(
+ GpuWatchdogThreadEvent thread_event) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
}
-void GpuWatchdogThreadImplV1::OnAddPowerObserver() {
- DCHECK(base::PowerMonitor::IsInitialized());
- base::PowerMonitor::AddObserver(this);
-}
+void GpuWatchdogThread::GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent timeout_event) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout", timeout_event);
-void GpuWatchdogThreadImplV1::OnSuspend() {
- power_suspend_ref_ = suspension_counter_.Take();
-}
+ bool recorded = false;
+ if (in_gpu_initialization_) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Init",
+ timeout_event);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromPowerResumed()) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.PowerResume",
+ timeout_event);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromForegrounded()) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Foregrounded",
+ timeout_event);
+ recorded = true;
+ }
-void GpuWatchdogThreadImplV1::OnResume() {
- power_suspend_ref_.reset();
+ if (!recorded) {
+ base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Normal",
+ timeout_event);
+ }
}
-void GpuWatchdogThreadImplV1::OnBackgroundedOnWatchdogThread() {
- background_suspend_ref_ = suspension_counter_.Take();
+#if defined(OS_WIN)
+void GpuWatchdogThread::RecordExtraThreadTimeHistogram() {
+ // Record the number of timeouts the GPU main thread needs to make a progress
+ // after GPU OnWatchdogTimeout() is triggered. The maximum count is 6 which
+ // is more than kMaxCountOfMoreGpuThreadTimeAllowed(4);
+ constexpr int kMin = 1;
+ constexpr int kMax = 6;
+ constexpr int kBuckets = 6;
+ int count = count_of_more_gpu_thread_time_allowed_;
+ bool recorded = false;
+
+ base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime", count,
+ kMin, kMax, kBuckets);
+
+ if (in_gpu_initialization_) {
+ base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Init",
+ count, kMin, kMax, kBuckets);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromPowerResumed()) {
+ base::UmaHistogramCustomCounts(
+ "GPU.WatchdogThread.ExtraThreadTime.PowerResume", count, kMin, kMax,
+ kBuckets);
+ recorded = true;
+ }
+
+ if (WithinOneMinFromForegrounded()) {
+ base::UmaHistogramCustomCounts(
+ "GPU.WatchdogThread.ExtraThreadTime.Foregrounded", count, kMin, kMax,
+ kBuckets);
+ recorded = true;
+ }
+
+ if (!recorded) {
+ base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Normal",
+ count, kMin, kMax, kBuckets);
+ }
}
-void GpuWatchdogThreadImplV1::OnForegroundedOnWatchdogThread() {
- background_suspend_ref_.reset();
+void GpuWatchdogThread::RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(
+ int count) {
+ constexpr int kMax = 4;
+
+ base::UmaHistogramExactLinear("GPU.WatchdogThread.ExtraThreadTime.NumOfUsers",
+ count, kMax);
}
-void GpuWatchdogThreadImplV1::SuspendStateChanged() {
- if (suspension_counter_.HasRefs()) {
- suspend_time_ = base::Time::Now();
- // When suspending force an acknowledgement to cancel any pending
- // termination tasks.
- OnAcknowledge();
+void GpuWatchdogThread::WatchedThreadNeedsMoreThreadTimeHistogram(
+ bool no_gpu_hang_detected,
+ bool start_of_more_thread_time) {
+ if (start_of_more_thread_time) {
+ // This is the start of allowing more thread time. Only record it once for
+ // all following timeouts on the same detected gpu hang, so we know this
+ // is equivlent one crash in our crash reports.
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
+ RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(0);
} else {
- resume_time_ = base::Time::Now();
-
- // After resuming jump-start the watchdog again.
- armed_ = false;
- OnCheck(true);
+ if (count_of_more_gpu_thread_time_allowed_ > 0) {
+ if (no_gpu_hang_detected) {
+ // If count_of_more_gpu_thread_time_allowed_ > 0, we know extra time was
+ // extended in the previous OnWatchdogTimeout(). Now we find gpu makes
+ // progress. Record this case.
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
+ RecordExtraThreadTimeHistogram();
+ } else {
+ if (count_of_more_gpu_thread_time_allowed_ >=
+ kMaxCountOfMoreGpuThreadTimeAllowed) {
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kLessThanFullThreadTimeAfterCapped);
+ }
+ }
+
+ // Records the number of users who are still waiting. We can use this
+ // number to calculate the number of users who had already quit.
+ RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(
+ count_of_more_gpu_thread_time_allowed_);
+
+ // Used by GPU.WatchdogThread.WaitTime later
+ time_in_wait_for_full_thread_time_ =
+ count_of_more_gpu_thread_time_allowed_ * watchdog_timeout_;
+ }
}
}
+#endif
+
+void GpuWatchdogThread::WatchedThreadGetsExtraTimeoutHistogram(
+ bool no_gpu_hang) {
+ constexpr int kMax = 60;
+ if (count_of_extra_cycles_ == 0 && !no_gpu_hang) {
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
+ base::UmaHistogramExactLinear("GPU.WatchdogThread.WaitTime.NumOfUsers", 0,
+ kMax);
+ } else if (count_of_extra_cycles_ > 0) {
+ int count = watchdog_timeout_.InSeconds() * count_of_extra_cycles_;
+ base::UmaHistogramExactLinear("GPU.WatchdogThread.WaitTime.NumOfUsers",
+ count, kMax);
+ if (no_gpu_hang) {
+ GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
+ base::UmaHistogramExactLinear(
+ "GPU.WatchdogThread.WaitTime.ProgressAfterWait", count, kMax);
#if defined(OS_WIN)
-base::ThreadTicks GpuWatchdogThreadImplV1::GetWatchedThreadTime() {
- if (base::ThreadTicks::IsSupported()) {
- // Convert ThreadTicks::Now() to TimeDelta.
- return base::ThreadTicks::GetForThread(
- base::PlatformThreadHandle(watched_thread_handle_));
- } else {
- // Use GetThreadTimes as a backup mechanism.
- FILETIME creation_time;
- FILETIME exit_time;
- FILETIME user_time;
- FILETIME kernel_time;
- BOOL result = GetThreadTimes(watched_thread_handle_, &creation_time,
- &exit_time, &kernel_time, &user_time);
- DCHECK(result);
-
- ULARGE_INTEGER user_time64;
- user_time64.HighPart = user_time.dwHighDateTime;
- user_time64.LowPart = user_time.dwLowDateTime;
-
- ULARGE_INTEGER kernel_time64;
- kernel_time64.HighPart = kernel_time.dwHighDateTime;
- kernel_time64.LowPart = kernel_time.dwLowDateTime;
-
- // Time is reported in units of 100 nanoseconds. Kernel and user time are
- // summed to deal with to kinds of hangs. One is where the GPU process is
- // stuck in user level, never calling into the kernel and kernel time is
- // not increasing. The other is where either the kernel hangs and never
- // returns to user level or where user level code
- // calls into kernel level repeatedly, giving up its quanta before it is
- // tracked, for example a loop that repeatedly Sleeps.
- return base::ThreadTicks() +
- base::TimeDelta::FromMilliseconds(static_cast<int64_t>(
- (user_time64.QuadPart + kernel_time64.QuadPart) / 10000));
+ // Add the time the GPU thread was given for the full thread time up to 60
+ // seconds. GPU.WatchdogThread.WaitTime is essentially equal to
+ // GPU.WatchdogThread.WaitTime.ProgressAfterWait on non-Windows systems.
+ base::TimeDelta wait_time = base::TimeDelta::FromSeconds(count);
+ wait_time += time_in_wait_for_full_thread_time_;
+
+ constexpr base::TimeDelta kMinTime = base::TimeDelta::FromSeconds(1);
+ constexpr base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(150);
+ constexpr int kBuckets = 50;
+
+ // The time the GPU main thread takes to finish a task after a "hang" is
+ // dectedted.
+ base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime", wait_time,
+ kMinTime, kMaxTime, kBuckets);
+#endif
+ }
}
}
-#endif
+
+bool GpuWatchdogThread::WithinOneMinFromPowerResumed() {
+ size_t count = base::ClampFloor<size_t>(base::TimeDelta::FromMinutes(1) /
+ watchdog_timeout_);
+ return power_resumed_event_ && num_of_timeout_after_power_resume_ <= count;
+}
+
+bool GpuWatchdogThread::WithinOneMinFromForegrounded() {
+ size_t count = base::ClampFloor<size_t>(base::TimeDelta::FromMinutes(1) /
+ watchdog_timeout_);
+ return foregrounded_event_ && num_of_timeout_after_foregrounded_ <= count;
+}
#if defined(USE_X11)
-void GpuWatchdogThreadImplV1::UpdateActiveTTY() {
+void GpuWatchdogThread::UpdateActiveTTY() {
last_active_tty_ = active_tty_;
active_tty_ = -1;
@@ -564,7 +885,45 @@ void GpuWatchdogThreadImplV1::UpdateActiveTTY() {
}
#endif
-GpuWatchdogThread::GpuWatchdogThread() : base::Thread("GpuWatchdog") {}
-GpuWatchdogThread::~GpuWatchdogThread() {}
+bool GpuWatchdogThread::ContinueOnNonHostX11ServerTty() {
+#if defined(USE_X11)
+ if (host_tty_ == -1 || active_tty_ == -1)
+ return false;
+
+ // Don't crash if we're not on the TTY of our host X11 server.
+ if (active_tty_ != host_tty_) {
+ // Only record for the time there is a change on TTY
+ if (last_active_tty_ == active_tty_) {
+ GpuWatchdogTimeoutHistogram(
+ GpuWatchdogTimeoutEvent::kContinueOnNonHostServerTty);
+ }
+ return true;
+ }
+#endif
+ return false;
+}
+
+// For gpu testing only. Return whether a GPU hang was detected or not.
+bool GpuWatchdogThread::IsGpuHangDetectedForTesting() {
+ DCHECK(is_test_mode_);
+ return test_result_timeout_and_gpu_hang_.IsSet();
+}
+
+// This should be called on the test main thread only. It will wait until the
+// power observer is added on the watchdog thread.
+void GpuWatchdogThread::WaitForPowerObserverAddedForTesting() {
+ DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
+ DCHECK(is_add_power_observer_called_);
+
+ // Just return if it has been added.
+ if (is_power_observer_added_)
+ return;
+
+ base::WaitableEvent event;
+ task_runner()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&event)));
+ event.Wait();
+}
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index ad26565910a..fb89801644d 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -66,213 +66,279 @@ enum class GpuWatchdogTimeoutEvent {
kMaxValue = kSlowWatchdogThread,
};
+#if defined(OS_WIN)
+// If the actual time the watched GPU thread spent doing actual work is less
+// than the watchdog timeout, the GPU thread can continue running through
+// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed.
+constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 3;
+#endif
+constexpr int kMaxExtraCyclesBeforeKill = 0;
+
// A thread that intermitently sends tasks to a group of watched message loops
// and deliberately crashes if one of them does not respond after a timeout.
class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread,
public base::PowerObserver,
+ public base::TaskObserver,
public gl::ProgressReporter {
public:
+ static std::unique_ptr<GpuWatchdogThread> Create(bool start_backgrounded);
+
+ static std::unique_ptr<GpuWatchdogThread> Create(
+ bool start_backgrounded,
+ base::TimeDelta timeout,
+ int init_factor,
+ int restart_factor,
+ int max_extra_cycles_before_kill,
+ bool test_mode);
+
~GpuWatchdogThread() override;
// Must be called after a PowerMonitor has been created. Can be called from
// any thread.
- virtual void AddPowerObserver() = 0;
+ void AddPowerObserver();
// Notifies the watchdog when Chrome is backgrounded / foregrounded. Should
// only be used if Chrome is completely backgrounded and not expected to
// render (all windows backgrounded and not producing frames).
- virtual void OnBackgrounded() = 0;
- virtual void OnForegrounded() = 0;
+ void OnBackgrounded();
+ void OnForegrounded();
// The watchdog starts armed to catch startup hangs, and needs to be disarmed
// once init is complete, before executing tasks.
- virtual void OnInitComplete() = 0;
+ void OnInitComplete();
// Notifies the watchdog when the GPU child process is being destroyed.
// This function is called directly from
// viz::GpuServiceImpl::~GpuServiceImpl()
- virtual void OnGpuProcessTearDown() = 0;
+ void OnGpuProcessTearDown();
// Pause the GPU watchdog to stop the timeout task. If the current heavy task
// is not running on the GPU driver, the watchdog can be paused to avoid
// unneeded crash.
- virtual void PauseWatchdog() = 0;
+ void PauseWatchdog();
// Continue the watchdog after a pause.
- virtual void ResumeWatchdog() = 0;
+ void ResumeWatchdog();
// For gpu testing only. Return status for the watchdog tests
- virtual bool IsGpuHangDetectedForTesting() = 0;
+ bool IsGpuHangDetectedForTesting();
- virtual void WaitForPowerObserverAddedForTesting() {}
+ void WaitForPowerObserverAddedForTesting();
- protected:
- GpuWatchdogThread();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(GpuWatchdogThread);
-};
+ // Implements base::Thread.
+ void Init() override;
+ void CleanUp() override;
-class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
- : public GpuWatchdogThread {
- public:
- ~GpuWatchdogThreadImplV1() override;
-
- static std::unique_ptr<GpuWatchdogThreadImplV1> Create(
- bool start_backgrounded);
-
- // Implements GpuWatchdogThread.
- void AddPowerObserver() override;
- void OnBackgrounded() override;
- void OnForegrounded() override;
- void OnInitComplete() override {}
- void OnGpuProcessTearDown() override {}
- void ResumeWatchdog() override {}
- void PauseWatchdog() override {}
- bool IsGpuHangDetectedForTesting() override;
-
- // gl::ProgressReporter implementation:
+ // Implements gl::ProgressReporter.
void ReportProgress() override;
+ // Implements TaskObserver.
+ void WillProcessTask(const base::PendingTask& pending_task,
+ bool was_blocked_or_low_priority) override;
+ void DidProcessTask(const base::PendingTask& pending_task) override;
+
+ // Implements base::PowerObserver.
+ void OnSuspend() override;
+ void OnResume() override;
+
protected:
- void Init() override;
- void CleanUp() override;
+ GpuWatchdogThread();
private:
- // An object of this type intercepts the reception and completion of all tasks
- // on the watched thread and checks whether the watchdog is armed.
- class GpuWatchdogTaskObserver : public base::TaskObserver {
- public:
- explicit GpuWatchdogTaskObserver(GpuWatchdogThreadImplV1* watchdog);
- ~GpuWatchdogTaskObserver() override;
-
- // Implements TaskObserver.
- void WillProcessTask(const base::PendingTask& pending_task,
- bool was_blocked_or_low_priority) override;
- void DidProcessTask(const base::PendingTask& pending_task) override;
-
- private:
- GpuWatchdogThreadImplV1* watchdog_;
+ enum PauseResumeSource {
+ kAndroidBackgroundForeground = 0,
+ kPowerSuspendResume = 1,
+ kGeneralGpuFlow = 2,
};
- // A helper class which allows multiple clients to suspend/resume the
- // watchdog thread. As we need to suspend resume on both background /
- // foreground events as well as power events, this class manages a ref-count
- // of suspend requests.
- class SuspensionCounter {
- public:
- SuspensionCounter(GpuWatchdogThreadImplV1* watchdog_thread);
-
- class SuspensionCounterRef {
- public:
- explicit SuspensionCounterRef(SuspensionCounter* counter);
- ~SuspensionCounterRef();
+ GpuWatchdogThread(base::TimeDelta timeout,
+ int init_factor,
+ int restart_factor,
+ int max_extra_cycles_before_kill,
+ bool test_mode);
+ void OnAddPowerObserver();
+ void RestartWatchdogTimeoutTask(PauseResumeSource source_of_request);
+ void StopWatchdogTimeoutTask(PauseResumeSource source_of_request);
+ void UpdateInitializationFlag();
+ void Arm();
+ void Disarm();
+ void InProgress();
+ bool IsArmed();
+ base::subtle::Atomic32 ReadArmDisarmCounter();
+ void OnWatchdogTimeout();
+ bool SlowWatchdogThread();
+ bool WatchedThreadNeedsMoreThreadTime(bool no_gpu_hang_detected);
+#if defined(OS_WIN)
+ base::ThreadTicks GetWatchedThreadTime();
+#endif
+ bool WatchedThreadGetsExtraTimeout(bool no_gpu_hang);
- private:
- SuspensionCounter* counter_;
- };
+ // Do not change the function name. It is used for [GPU HANG] carsh reports.
+ void DeliberatelyTerminateToRecoverFromHang();
- // This class must outlive SuspensionCounterRefs.
- std::unique_ptr<SuspensionCounterRef> Take();
+ // Records "GPU.WatchdogThread.Event".
+ void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event);
- // Used to update the |watchdog_thread_sequence_checker_|.
- void OnWatchdogThreadStopped();
+ // Histogram recorded in OnWatchdogTimeout()
+ // Records "GPU.WatchdogThread.Timeout"
+ void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event);
- bool HasRefs() const;
+#if defined(OS_WIN)
+ // The extra thread time the GPU main thread needs to make a progress.
+ // Records "GPU.WatchdogThread.ExtraThreadTime".
+ void RecordExtraThreadTimeHistogram();
+ // The number of users per timeout stay in Chrome after giving extra thread
+ // time. Records "GPU.WatchdogThread.ExtraThreadTime.NumOfUsers" and
+ // "GPU.WatchdogThread.Timeout".
+ void RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(int count);
+
+ // Histograms recorded for WatchedThreadNeedsMoreThreadTime() function.
+ void WatchedThreadNeedsMoreThreadTimeHistogram(
+ bool no_gpu_hang_detected,
+ bool start_of_more_thread_time);
+#endif
- private:
- void OnAddRef();
- void OnReleaseRef();
- GpuWatchdogThreadImplV1* watchdog_thread_;
- uint32_t suspend_count_ = 0;
+ // The number of users stay in Chrome after the extra timeout wait cycles.
+ // Records "GPU.WatchdogThread.WaitTime.ProgressAfterWait",
+ // "GPU.WatchdogThread.WaitTime.NumOfUsers" and "GPU.WatchdogThread.Timeout".
+ void WatchedThreadGetsExtraTimeoutHistogram(bool no_gpu_hang);
- SEQUENCE_CHECKER(watchdog_thread_sequence_checker_);
- };
- GpuWatchdogThreadImplV1();
+ // Used for metrics. It's 1 minute after the event.
+ bool WithinOneMinFromPowerResumed();
+ bool WithinOneMinFromForegrounded();
- void CheckArmed();
+#if defined(USE_X11)
+ void UpdateActiveTTY();
+#endif
+ // The watchdog continues when it's not on the TTY of our host X11 server.
+ bool ContinueOnNonHostX11ServerTty();
- void OnAcknowledge();
- void OnCheck(bool after_suspend);
- void OnCheckTimeout();
- // Do not change the function name. It is used for [GPU HANG] carsh reports.
- void DeliberatelyTerminateToRecoverFromHang();
+ // This counter is only written on the gpu thread, and read on both threads.
+ volatile base::subtle::Atomic32 arm_disarm_counter_ = 0;
+ // The counter number read in the last OnWatchdogTimeout() on the watchdog
+ // thread.
+ int32_t last_arm_disarm_counter_ = 0;
- void OnAddPowerObserver();
+ // Timeout on the watchdog thread to check if gpu hangs.
+ base::TimeDelta watchdog_timeout_;
- // Implement PowerObserver.
- void OnSuspend() override;
- void OnResume() override;
+ // The one-time watchdog timeout multiplier in the gpu initialization.
+ int watchdog_init_factor_;
- // Handle background/foreground.
- void OnBackgroundedOnWatchdogThread();
- void OnForegroundedOnWatchdogThread();
+ // The one-time watchdog timeout multiplier after the watchdog pauses and
+ // restarts.
+ int watchdog_restart_factor_;
- void SuspendStateChanged();
+ // The time the gpu watchdog was created.
+ base::TimeTicks watchdog_start_timeticks_;
-#if defined(OS_WIN)
- base::ThreadTicks GetWatchedThreadTime();
-#endif
+ // The time the last OnSuspend and OnResume was called.
+ base::TimeTicks power_suspend_timeticks_;
+ base::TimeTicks power_resume_timeticks_;
-#if defined(USE_X11)
- void UpdateActiveTTY();
-#endif
+ // The time the last OnBackgrounded and OnForegrounded was called.
+ base::TimeTicks backgrounded_timeticks_;
+ base::TimeTicks foregrounded_timeticks_;
- scoped_refptr<base::SingleThreadTaskRunner> watched_task_runner_;
- base::TimeDelta timeout_;
- bool armed_;
- GpuWatchdogTaskObserver task_observer_;
+ // The time PauseWatchdog and ResumeWatchdog was called.
+ base::TimeTicks watchdog_pause_timeticks_;
+ base::TimeTicks watchdog_resume_timeticks_;
- // |awaiting_acknowledge_| is only ever read on the watched thread, but may
- // be modified on either the watched or watchdog thread. Reads/writes should
- // be careful to ensure that appropriate synchronization is used.
- base::subtle::Atomic32 awaiting_acknowledge_;
+ // TimeTicks: Tracking the amount of time a task runs. Executing delayed
+ // tasks at the right time.
+ // ThreadTicks: Use this timer to (approximately) measure how much time the
+ // calling thread spent doing actual work vs. being de-scheduled.
- // True if the watchdog should wait for a certain amount of CPU to be used
- // before killing the process.
- bool use_thread_cpu_time_;
+ // The time the last OnWatchdogTimeout() was called.
+ base::TimeTicks last_on_watchdog_timeout_timeticks_;
- // The number of consecutive acknowledgements that had a latency less than
- // 50ms.
- int responsive_acknowledge_count_;
+ // The wall-clock time the next OnWatchdogTimeout() will be called.
+ base::Time next_on_watchdog_timeout_time_;
#if defined(OS_WIN)
- void* watched_thread_handle_;
- base::ThreadTicks arm_cpu_time_;
+ base::ThreadTicks last_on_watchdog_timeout_thread_ticks_;
- // This measures the time that the system has been running, in units of 100
- // ns.
- ULONGLONG arm_interrupt_time_;
-#endif
+ // The difference between the timeout and the actual time the watched thread
+ // spent doing actual work.
+ base::TimeDelta remaining_watched_thread_ticks_;
- // Time after which it's assumed that the computer has been suspended since
- // the task was posted.
- base::Time suspension_timeout_;
+ // The Windows thread hanndle of the watched GPU main thread.
+ void* watched_thread_handle_ = nullptr;
- SuspensionCounter suspension_counter_;
- std::unique_ptr<SuspensionCounter::SuspensionCounterRef> power_suspend_ref_;
- std::unique_ptr<SuspensionCounter::SuspensionCounterRef>
- background_suspend_ref_;
+ // After GPU hang detected, how many times has the GPU thread been allowed to
+ // continue due to not enough thread time.
+ int count_of_more_gpu_thread_time_allowed_ = 0;
- // The time the last OnSuspend and OnResume was called.
- base::Time suspend_time_;
- base::Time resume_time_;
+ // The total timeout, up to 60 seconds, the watchdog thread waits for the GPU
+ // main thread to get full thread time.
+ base::TimeDelta time_in_wait_for_full_thread_time_;
- // This is the time the last check was sent.
- base::Time check_time_;
- base::TimeTicks check_timeticks_;
+ // After detecting GPU hang and continuing running through
+ // OnGpuWatchdogTimeout for the max cycles, the GPU main thread still cannot
+ // get the full thread time.
+ bool less_than_full_thread_time_after_capped_ = false;
+#endif
#if defined(USE_X11)
- FILE* tty_file_;
- int host_tty_;
+ FILE* tty_file_ = nullptr;
+ int host_tty_ = -1;
int active_tty_ = -1;
int last_active_tty_ = -1;
#endif
- base::WeakPtrFactory<GpuWatchdogThreadImplV1> weak_factory_{this};
+ // The system has entered the power suspension mode.
+ bool in_power_suspension_ = false;
+
+ // The GPU process has started tearing down. Accessed only in the gpu process.
+ bool in_gpu_process_teardown_ = false;
+
+ // Chrome is running on the background on Android. Gpu is probably very slow
+ // or stalled.
+ bool is_backgrounded_ = false;
+
+ // The GPU watchdog is paused. The timeout task is temporarily stopped.
+ bool is_paused_ = false;
+
+ // Whether the watchdog thread has been called and added to the power monitor
+ // observer.
+ bool is_add_power_observer_called_ = false;
+ bool is_power_observer_added_ = false;
+
+ // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
+ bool is_watchdog_start_histogram_recorded = false;
+
+ // Read/Write by the watchdog thread only after initialized in the
+ // constructor.
+ bool in_gpu_initialization_ = false;
- DISALLOW_COPY_AND_ASSIGN(GpuWatchdogThreadImplV1);
+ // The number of logical processors/cores on the current machine.
+ int num_of_processors_ = 0;
+
+ // Don't kill the GPU process immediately after a gpu hang is detected. Wait
+ // for extra cycles of timeout. Kill it, if the GPU still doesn't respond
+ // after wait.
+ const int max_extra_cycles_before_kill_;
+ // how many cycles of timeout since we detect a hang.
+ int count_of_extra_cycles_ = 0;
+
+ // For the experiment and the debugging purpose
+ size_t num_of_timeout_after_power_resume_ = 0;
+ size_t num_of_timeout_after_foregrounded_ = 0;
+ bool foregrounded_event_ = false;
+ bool power_resumed_event_ = false;
+
+ // For gpu testing only.
+ const bool is_test_mode_;
+ // Set by the watchdog thread and Read by the test thread.
+ base::AtomicFlag test_result_timeout_and_gpu_hang_;
+
+ scoped_refptr<base::SingleThreadTaskRunner> watched_gpu_task_runner_;
+ scoped_refptr<base::SingleThreadTaskRunner> watchdog_thread_task_runner_;
+
+ base::WeakPtr<GpuWatchdogThread> weak_ptr_;
+ base::WeakPtrFactory<GpuWatchdogThread> weak_factory_{this};
+
+ DISALLOW_COPY_AND_ASSIGN(GpuWatchdogThread);
};
} // namespace gpu
-
#endif // GPU_IPC_SERVICE_GPU_WATCHDOG_THREAD_H_
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
index 9b1b3629606..953a0318c7a 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_unittest.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "base/test/task_environment.h"
-#include "gpu/ipc/service/gpu_watchdog_thread_v2.h"
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_monitor_source.h"
@@ -70,7 +70,7 @@ void GpuWatchdogTest::SetUp() {
ASSERT_TRUE(base::CurrentThread::IsSet());
// Set watchdog timeout to 1000 milliseconds
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
+ watchdog_thread_ = gpu::GpuWatchdogThread::Create(
/*start_backgrounded*/ false,
/*timeout*/ kGpuWatchdogTimeoutForTesting,
/*init_factor*/ kInitFactor,
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
deleted file mode 100644
index 3b475420976..00000000000
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
+++ /dev/null
@@ -1,933 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/service/gpu_watchdog_thread_v2.h"
-
-#include "base/atomicops.h"
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/bit_cast.h"
-#include "base/debug/alias.h"
-#include "base/debug/dump_without_crashing.h"
-#include "base/files/file_path.h"
-#include "base/files/file_util.h"
-#include "base/memory/ptr_util.h"
-#include "base/metrics/field_trial_params.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/native_library.h"
-#include "base/numerics/safe_conversions.h"
-#include "base/power_monitor/power_monitor.h"
-#include "base/process/process.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/system/sys_info.h"
-#include "base/task/current_thread.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-#include "gpu/config/gpu_crash_keys.h"
-#include "gpu/config/gpu_finch_features.h"
-#include "gpu/ipc/common/result_codes.h"
-
-#if defined(OS_WIN)
-#include "base/win/windows_version.h"
-#endif
-
-namespace gpu {
-#if defined(OS_WIN)
-base::TimeDelta GetGpuWatchdogTimeoutBasedOnCpuCores() {
- if (base::win::GetVersion() >= base::win::Version::WIN10) {
- int num_of_processors = base::SysInfo::NumberOfProcessors();
-
- if (num_of_processors > 8)
- return (kGpuWatchdogTimeout - base::TimeDelta::FromSeconds(10));
- else if (num_of_processors <= 4)
- return kGpuWatchdogTimeout + base::TimeDelta::FromSeconds(5);
- }
-
- return kGpuWatchdogTimeout;
-}
-#endif
-
-GpuWatchdogThreadImplV2::GpuWatchdogThreadImplV2(
- base::TimeDelta timeout,
- int init_factor,
- int restart_factor,
- int max_extra_cycles_before_kill,
- bool is_test_mode)
- : watchdog_timeout_(timeout),
- watchdog_init_factor_(init_factor),
- watchdog_restart_factor_(restart_factor),
- in_gpu_initialization_(true),
- max_extra_cycles_before_kill_(max_extra_cycles_before_kill),
- is_test_mode_(is_test_mode),
- watched_gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()) {
- base::CurrentThread::Get()->AddTaskObserver(this);
- num_of_processors_ = base::SysInfo::NumberOfProcessors();
-
-#if defined(OS_WIN)
- // GetCurrentThread returns a pseudo-handle that cannot be used by one thread
- // to identify another. DuplicateHandle creates a "real" handle that can be
- // used for this purpose.
- if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),
- GetCurrentProcess(), &watched_thread_handle_,
- THREAD_QUERY_INFORMATION, FALSE, 0)) {
- watched_thread_handle_ = nullptr;
- }
-#endif
-
-#if defined(USE_X11)
- tty_file_ = base::OpenFile(
- base::FilePath(FILE_PATH_LITERAL("/sys/class/tty/tty0/active")), "r");
- UpdateActiveTTY();
- host_tty_ = active_tty_;
-#endif
-
- Arm();
-}
-
-GpuWatchdogThreadImplV2::~GpuWatchdogThreadImplV2() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- // Stop() might take too long and the watchdog timeout is triggered.
- // Disarm first before calling Stop() to avoid a crash.
- if (IsArmed())
- Disarm();
- PauseWatchdog();
-
- Stop(); // stop the watchdog thread
-
- base::CurrentThread::Get()->RemoveTaskObserver(this);
- base::PowerMonitor::RemoveObserver(this);
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogEnd);
-#if defined(OS_WIN)
- if (watched_thread_handle_)
- CloseHandle(watched_thread_handle_);
-#endif
-
-#if defined(USE_X11)
- if (tty_file_)
- fclose(tty_file_);
-#endif
-}
-
-// static
-std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
- bool start_backgrounded,
- base::TimeDelta timeout,
- int init_factor,
- int restart_factor,
- int max_extra_cycles_before_kill,
- bool is_test_mode) {
- auto watchdog_thread = base::WrapUnique(
- new GpuWatchdogThreadImplV2(timeout, init_factor, restart_factor,
- max_extra_cycles_before_kill, is_test_mode));
- base::Thread::Options options;
- options.timer_slack = base::TIMER_SLACK_MAXIMUM;
- watchdog_thread->StartWithOptions(options);
- if (start_backgrounded)
- watchdog_thread->OnBackgrounded();
- return watchdog_thread;
-}
-
-// static
-std::unique_ptr<GpuWatchdogThreadImplV2> GpuWatchdogThreadImplV2::Create(
- bool start_backgrounded) {
- base::TimeDelta gpu_watchdog_timeout = kGpuWatchdogTimeout;
- int init_factor = kInitFactor;
- int restart_factor = kRestartFactor;
- int max_extra_cycles_before_kill = kMaxExtraCyclesBeforeKill;
-
- if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2NewTimeout)) {
- const char kNewTimeOutParam[] = "new_time_out";
- const char kMaxExtraCyclesBeforeKillParam[] =
- "max_extra_cycles_before_kill";
-
-#if defined(OS_WIN)
- // The purpose of finch on Windows is to know the impact of the number of
- // CPU cores while the rest of platforms are to try a different watchdog
- // timeout length.
- gpu_watchdog_timeout = GetGpuWatchdogTimeoutBasedOnCpuCores();
- constexpr int kFinchMaxExtraCyclesBeforeKill = 0;
-#elif defined(OS_ANDROID)
- constexpr int kFinchMaxExtraCyclesBeforeKill = 0;
- init_factor = kInitFactorFinch;
- restart_factor = kRestartFactorFinch;
-#elif defined(OS_MAC)
- constexpr int kFinchMaxExtraCyclesBeforeKill = 1;
-#else
- constexpr int kFinchMaxExtraCyclesBeforeKill = 2;
-#endif
-
- int timeout = base::GetFieldTrialParamByFeatureAsInt(
- features::kGpuWatchdogV2NewTimeout, kNewTimeOutParam,
- gpu_watchdog_timeout.InSeconds());
- gpu_watchdog_timeout = base::TimeDelta::FromSeconds(timeout);
-
- max_extra_cycles_before_kill = base::GetFieldTrialParamByFeatureAsInt(
- features::kGpuWatchdogV2NewTimeout, kMaxExtraCyclesBeforeKillParam,
- kFinchMaxExtraCyclesBeforeKill);
- }
-
- return Create(start_backgrounded, gpu_watchdog_timeout, init_factor,
- restart_factor, max_extra_cycles_before_kill, false);
-}
-
-// Do not add power observer during watchdog init, PowerMonitor might not be up
-// running yet.
-void GpuWatchdogThreadImplV2::AddPowerObserver() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- // Forward it to the watchdog thread. Call PowerMonitor::AddObserver on the
- // watchdog thread so that OnSuspend and OnResume will be called on watchdog
- // thread.
- is_add_power_observer_called_ = true;
- task_runner()->PostTask(
- FROM_HERE, base::BindOnce(&GpuWatchdogThreadImplV2::OnAddPowerObserver,
- base::Unretained(this)));
-}
-
-// Android Chrome goes to the background. Called from the gpu thread.
-void GpuWatchdogThreadImplV2::OnBackgrounded() {
- task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask,
- base::Unretained(this), kAndroidBackgroundForeground));
-}
-
-// Android Chrome goes to the foreground. Called from the gpu thread.
-void GpuWatchdogThreadImplV2::OnForegrounded() {
- task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask,
- base::Unretained(this), kAndroidBackgroundForeground));
-}
-
-// Called from the gpu thread when gpu init has completed.
-void GpuWatchdogThreadImplV2::OnInitComplete() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::UpdateInitializationFlag,
- base::Unretained(this)));
- Disarm();
-}
-
-// Called from the gpu thread in viz::GpuServiceImpl::~GpuServiceImpl().
-// After this, no Disarm() will be called before the watchdog thread is
-// destroyed. If this destruction takes too long, the watchdog timeout
-// will be triggered.
-void GpuWatchdogThreadImplV2::OnGpuProcessTearDown() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- in_gpu_process_teardown_ = true;
- if (!IsArmed())
- Arm();
-}
-
-// Called from the gpu main thread.
-void GpuWatchdogThreadImplV2::PauseWatchdog() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask,
- base::Unretained(this), kGeneralGpuFlow));
-}
-
-// Called from the gpu main thread.
-void GpuWatchdogThreadImplV2::ResumeWatchdog() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask,
- base::Unretained(this), kGeneralGpuFlow));
-}
-
-// Running on the watchdog thread.
-// On Linux, Init() will be called twice for Sandbox Initialization. The
-// watchdog is stopped and then restarted in StartSandboxLinux(). Everything
-// should be the same and continue after the second init().
-void GpuWatchdogThreadImplV2::Init() {
- watchdog_thread_task_runner_ = base::ThreadTaskRunnerHandle::Get();
-
- // Get and Invalidate weak_ptr should be done on the watchdog thread only.
- weak_ptr_ = weak_factory_.GetWeakPtr();
- base::TimeDelta timeout = watchdog_timeout_ * kInitFactor;
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
- timeout);
-
- last_arm_disarm_counter_ = ReadArmDisarmCounter();
- watchdog_start_timeticks_ = base::TimeTicks::Now();
- last_on_watchdog_timeout_timeticks_ = watchdog_start_timeticks_;
- next_on_watchdog_timeout_time_ = base::Time::Now() + timeout;
-
-#if defined(OS_WIN)
- if (watched_thread_handle_) {
- if (base::ThreadTicks::IsSupported())
- base::ThreadTicks::WaitUntilInitialized();
- last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
- remaining_watched_thread_ticks_ = timeout;
- }
-#endif
-}
-
-// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::CleanUp() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
- weak_factory_.InvalidateWeakPtrs();
-}
-
-void GpuWatchdogThreadImplV2::ReportProgress() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- InProgress();
-}
-
-void GpuWatchdogThreadImplV2::WillProcessTask(
- const base::PendingTask& pending_task,
- bool was_blocked_or_low_priority) {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- // The watchdog is armed at the beginning of the gpu process teardown.
- // Do not call Arm() during teardown.
- if (in_gpu_process_teardown_)
- DCHECK(IsArmed());
- else
- Arm();
-}
-
-void GpuWatchdogThreadImplV2::DidProcessTask(
- const base::PendingTask& pending_task) {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- // Keep the watchdog armed during tear down.
- if (in_gpu_process_teardown_)
- InProgress();
- else
- Disarm();
-}
-
-// Power Suspends. Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::OnSuspend() {
- StopWatchdogTimeoutTask(kPowerSuspendResume);
-}
-
-// Power Resumes. Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::OnResume() {
- RestartWatchdogTimeoutTask(kPowerSuspendResume);
-}
-
-// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::OnAddPowerObserver() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
- DCHECK(base::PowerMonitor::IsInitialized());
-
- base::PowerMonitor::AddObserver(this);
- is_power_observer_added_ = true;
-}
-
-// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::RestartWatchdogTimeoutTask(
- PauseResumeSource source_of_request) {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
- base::TimeDelta timeout;
-
- switch (source_of_request) {
- case kAndroidBackgroundForeground:
- if (!is_backgrounded_)
- return;
- is_backgrounded_ = false;
- timeout = watchdog_timeout_ * watchdog_restart_factor_;
- foregrounded_timeticks_ = base::TimeTicks::Now();
- foregrounded_event_ = true;
- num_of_timeout_after_foregrounded_ = 0;
- break;
- case kPowerSuspendResume:
- if (!in_power_suspension_)
- return;
- in_power_suspension_ = false;
- timeout = watchdog_timeout_ * watchdog_restart_factor_;
- power_resume_timeticks_ = base::TimeTicks::Now();
- power_resumed_event_ = true;
- num_of_timeout_after_power_resume_ = 0;
- break;
- case kGeneralGpuFlow:
- if (!is_paused_)
- return;
- is_paused_ = false;
- timeout = watchdog_timeout_ * watchdog_init_factor_;
- watchdog_resume_timeticks_ = base::TimeTicks::Now();
- break;
- }
-
- if (!is_backgrounded_ && !in_power_suspension_ && !is_paused_) {
- weak_ptr_ = weak_factory_.GetWeakPtr();
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
- timeout);
- last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- next_on_watchdog_timeout_time_ = base::Time::Now() + timeout;
- last_arm_disarm_counter_ = ReadArmDisarmCounter();
-#if defined(OS_WIN)
- if (watched_thread_handle_) {
- last_on_watchdog_timeout_thread_ticks_ = GetWatchedThreadTime();
- remaining_watched_thread_ticks_ = timeout;
- }
-#endif
- }
-}
-
-void GpuWatchdogThreadImplV2::StopWatchdogTimeoutTask(
- PauseResumeSource source_of_request) {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
-
- switch (source_of_request) {
- case kAndroidBackgroundForeground:
- if (is_backgrounded_)
- return;
- is_backgrounded_ = true;
- backgrounded_timeticks_ = base::TimeTicks::Now();
- foregrounded_event_ = false;
- break;
- case kPowerSuspendResume:
- if (in_power_suspension_)
- return;
- in_power_suspension_ = true;
- power_suspend_timeticks_ = base::TimeTicks::Now();
- power_resumed_event_ = false;
- break;
- case kGeneralGpuFlow:
- if (is_paused_)
- return;
- is_paused_ = true;
- watchdog_pause_timeticks_ = base::TimeTicks::Now();
- break;
- }
-
- // Revoke any pending watchdog timeout task
- weak_factory_.InvalidateWeakPtrs();
-}
-
-void GpuWatchdogThreadImplV2::UpdateInitializationFlag() {
- in_gpu_initialization_ = false;
-}
-
-// Called from the gpu main thread.
-// The watchdog is armed only in these three functions -
-// GpuWatchdogThreadImplV2(), WillProcessTask(), and OnGpuProcessTearDown()
-void GpuWatchdogThreadImplV2::Arm() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- base::subtle::NoBarrier_AtomicIncrement(&arm_disarm_counter_, 1);
-
- // Arm/Disarm are always called in sequence. Now it's an odd number.
- DCHECK(IsArmed());
-}
-
-void GpuWatchdogThreadImplV2::Disarm() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- base::subtle::NoBarrier_AtomicIncrement(&arm_disarm_counter_, 1);
-
- // Arm/Disarm are always called in sequence. Now it's an even number.
- DCHECK(!IsArmed());
-}
-
-void GpuWatchdogThreadImplV2::InProgress() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
-
- // Increment by 2. This is equivalent to Disarm() + Arm().
- base::subtle::NoBarrier_AtomicIncrement(&arm_disarm_counter_, 2);
-
- // Now it's an odd number.
- DCHECK(IsArmed());
-}
-
-bool GpuWatchdogThreadImplV2::IsArmed() {
- // It's an odd number.
- return base::subtle::NoBarrier_Load(&arm_disarm_counter_) & 1;
-}
-
-base::subtle::Atomic32 GpuWatchdogThreadImplV2::ReadArmDisarmCounter() {
- return base::subtle::NoBarrier_Load(&arm_disarm_counter_);
-}
-
-// Running on the watchdog thread.
-void GpuWatchdogThreadImplV2::OnWatchdogTimeout() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
- DCHECK(!is_backgrounded_);
- DCHECK(!in_power_suspension_);
- DCHECK(!is_paused_);
-
- // If this metric is added too early (eg. watchdog creation time), it cannot
- // be persistent. The histogram data will be lost after crash or browser exit.
- // Delay the recording of kGpuWatchdogStart until the firs
- // OnWatchdogTimeout() to ensure this metric is created in the persistent
- // memory.
- if (!is_watchdog_start_histogram_recorded) {
- is_watchdog_start_histogram_recorded = true;
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
- }
-
- auto arm_disarm_counter = ReadArmDisarmCounter();
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
- if (power_resumed_event_)
- num_of_timeout_after_power_resume_++;
- if (foregrounded_event_)
- num_of_timeout_after_foregrounded_++;
-
-#if defined(USE_X11)
- UpdateActiveTTY();
-#endif
-
- // Collect all needed info for gpu hang detection.
- bool disarmed = arm_disarm_counter % 2 == 0; // even number
- bool gpu_makes_progress = arm_disarm_counter != last_arm_disarm_counter_;
- bool no_gpu_hang = disarmed || gpu_makes_progress || SlowWatchdogThread();
-
- bool watched_thread_needs_more_time =
- WatchedThreadNeedsMoreThreadTime(no_gpu_hang);
- no_gpu_hang = no_gpu_hang || watched_thread_needs_more_time ||
- ContinueOnNonHostX11ServerTty();
-
- bool allows_extra_timeout = WatchedThreadGetsExtraTimeout(no_gpu_hang);
- no_gpu_hang = no_gpu_hang || allows_extra_timeout;
-
- // No gpu hang. Continue with another OnWatchdogTimeout task.
- if (no_gpu_hang) {
- last_on_watchdog_timeout_timeticks_ = base::TimeTicks::Now();
- next_on_watchdog_timeout_time_ = base::Time::Now() + watchdog_timeout_;
- last_arm_disarm_counter_ = ReadArmDisarmCounter();
-
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(&GpuWatchdogThreadImplV2::OnWatchdogTimeout, weak_ptr_),
- watchdog_timeout_);
- return;
- }
-
- // Still armed without any progress. GPU possibly hangs.
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
-#if defined(OS_WIN)
- if (less_than_full_thread_time_after_capped_)
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKillOnLessThreadTime);
-#endif
-
- DeliberatelyTerminateToRecoverFromHang();
-}
-
-bool GpuWatchdogThreadImplV2::SlowWatchdogThread() {
- // If it takes 15 more seconds than the expected time between two
- // OnWatchdogTimeout() calls, the system is considered slow and it's not a GPU
- // hang.
- bool slow_watchdog_thread =
- (base::Time::Now() - next_on_watchdog_timeout_time_) >=
- base::TimeDelta::FromSeconds(15);
-
- // Record this case only when a GPU hang is detected and the thread is slow.
- if (slow_watchdog_thread)
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kSlowWatchdogThread);
-
- return slow_watchdog_thread;
-}
-
-bool GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreThreadTime(
- bool no_gpu_hang_detected) {
-#if defined(OS_WIN)
- if (!watched_thread_handle_)
- return false;
-
- // We allow extra thread time. When that runs out, we extend extra timeout
- // cycles. Now, we are extending extra timeout cycles. Don't add extra thread
- // time.
- if (count_of_extra_cycles_ > 0)
- return false;
-
- WatchedThreadNeedsMoreThreadTimeHistogram(
- no_gpu_hang_detected,
- /*start_of_more_thread_time*/ false);
-
- if (!no_gpu_hang_detected && count_of_more_gpu_thread_time_allowed_ >=
- kMaxCountOfMoreGpuThreadTimeAllowed) {
- less_than_full_thread_time_after_capped_ = true;
- } else {
- less_than_full_thread_time_after_capped_ = false;
- }
-
- // Calculate how many thread ticks the watched thread spent doing the work.
- base::ThreadTicks now = GetWatchedThreadTime();
- base::TimeDelta thread_time_elapsed =
- now - last_on_watchdog_timeout_thread_ticks_;
- last_on_watchdog_timeout_thread_ticks_ = now;
- remaining_watched_thread_ticks_ -= thread_time_elapsed;
-
- if (no_gpu_hang_detected ||
- count_of_more_gpu_thread_time_allowed_ >=
- kMaxCountOfMoreGpuThreadTimeAllowed ||
- thread_time_elapsed < base::TimeDelta() /* bogus data */ ||
- remaining_watched_thread_ticks_ <= base::TimeDelta()) {
- // Reset the remaining thread ticks.
- remaining_watched_thread_ticks_ = watchdog_timeout_;
- count_of_more_gpu_thread_time_allowed_ = 0;
-
- return false;
- } else {
- // This is the start of allowing more thread time.
- if (count_of_more_gpu_thread_time_allowed_ == 0) {
- WatchedThreadNeedsMoreThreadTimeHistogram(
- no_gpu_hang_detected, /*start_of_more_thread_time*/ true);
- }
- count_of_more_gpu_thread_time_allowed_++;
-
- return true;
- }
-#else
- return false;
-#endif
-}
-
-#if defined(OS_WIN)
-base::ThreadTicks GpuWatchdogThreadImplV2::GetWatchedThreadTime() {
- DCHECK(watched_thread_handle_);
-
- if (base::ThreadTicks::IsSupported()) {
- // Note: GetForThread() might return bogus results if running on different
- // CPUs between two calls.
- return base::ThreadTicks::GetForThread(
- base::PlatformThreadHandle(watched_thread_handle_));
- } else {
- FILETIME creation_time;
- FILETIME exit_time;
- FILETIME kernel_time;
- FILETIME user_time;
- BOOL result = GetThreadTimes(watched_thread_handle_, &creation_time,
- &exit_time, &kernel_time, &user_time);
- if (!result)
- return base::ThreadTicks();
-
- // Need to bit_cast to fix alignment, then divide by 10 to convert
- // 100-nanoseconds to microseconds.
- int64_t user_time_us = bit_cast<int64_t, FILETIME>(user_time) / 10;
- int64_t kernel_time_us = bit_cast<int64_t, FILETIME>(kernel_time) / 10;
-
- return base::ThreadTicks() +
- base::TimeDelta::FromMicroseconds(user_time_us + kernel_time_us);
- }
-}
-#endif
-
-bool GpuWatchdogThreadImplV2::WatchedThreadGetsExtraTimeout(bool no_gpu_hang) {
- if (max_extra_cycles_before_kill_ == 0)
- return false;
-
- // We want to record histograms even if there is no gpu hang.
- bool allows_more_timeouts = false;
- WatchedThreadGetsExtraTimeoutHistogram(no_gpu_hang);
-
- if (no_gpu_hang) {
- if (count_of_extra_cycles_ > 0) {
- count_of_extra_cycles_ = 0;
- }
- } else if (count_of_extra_cycles_ < max_extra_cycles_before_kill_) {
- count_of_extra_cycles_++;
- allows_more_timeouts = true;
- }
-
- return allows_more_timeouts;
-}
-
-void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
- DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
- // If this is for gpu testing, do not terminate the gpu process.
- if (is_test_mode_) {
- test_result_timeout_and_gpu_hang_.Set();
- return;
- }
-
-#if defined(OS_WIN)
- if (IsDebuggerPresent())
- return;
-#endif
-
- // Store variables so they're available in crash dumps to help determine the
- // cause of any hang.
- base::TimeTicks function_begin_timeticks = base::TimeTicks::Now();
- base::debug::Alias(&in_gpu_initialization_);
- base::debug::Alias(&num_of_timeout_after_power_resume_);
- base::debug::Alias(&num_of_timeout_after_foregrounded_);
- base::debug::Alias(&function_begin_timeticks);
- base::debug::Alias(&watchdog_start_timeticks_);
- base::debug::Alias(&power_suspend_timeticks_);
- base::debug::Alias(&power_resume_timeticks_);
- base::debug::Alias(&backgrounded_timeticks_);
- base::debug::Alias(&foregrounded_timeticks_);
- base::debug::Alias(&watchdog_pause_timeticks_);
- base::debug::Alias(&watchdog_resume_timeticks_);
- base::debug::Alias(&in_power_suspension_);
- base::debug::Alias(&in_gpu_process_teardown_);
- base::debug::Alias(&is_backgrounded_);
- base::debug::Alias(&is_add_power_observer_called_);
- base::debug::Alias(&is_power_observer_added_);
- base::debug::Alias(&last_on_watchdog_timeout_timeticks_);
- base::TimeDelta timeticks_elapses =
- function_begin_timeticks - last_on_watchdog_timeout_timeticks_;
- base::debug::Alias(&timeticks_elapses);
- base::debug::Alias(&max_extra_cycles_before_kill_);
-#if defined(OS_WIN)
- base::debug::Alias(&remaining_watched_thread_ticks_);
- base::debug::Alias(&less_than_full_thread_time_after_capped_);
-#endif
-
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
-
- crash_keys::gpu_watchdog_crashed_in_gpu_init.Set(
- in_gpu_initialization_ ? "1" : "0");
-
- crash_keys::gpu_watchdog_kill_after_power_resume.Set(
- WithinOneMinFromPowerResumed() ? "1" : "0");
-
- crash_keys::num_of_processors.Set(base::NumberToString(num_of_processors_));
-
- // Check the arm_disarm_counter value one more time.
- auto last_arm_disarm_counter = ReadArmDisarmCounter();
- base::debug::Alias(&last_arm_disarm_counter);
-
- // Use RESULT_CODE_HUNG so this crash is separated from other
- // EXCEPTION_ACCESS_VIOLATION buckets for UMA analysis.
- // Create a crash dump first. TerminateCurrentProcessImmediately will not
- // create a dump.
- base::debug::DumpWithoutCrashing();
- base::Process::TerminateCurrentProcessImmediately(RESULT_CODE_HUNG);
-}
-
-void GpuWatchdogThreadImplV2::GpuWatchdogHistogram(
- GpuWatchdogThreadEvent thread_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
-}
-
-void GpuWatchdogThreadImplV2::GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent timeout_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout", timeout_event);
-
- bool recorded = false;
- if (in_gpu_initialization_) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Init",
- timeout_event);
- recorded = true;
- }
-
- if (WithinOneMinFromPowerResumed()) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.PowerResume",
- timeout_event);
- recorded = true;
- }
-
- if (WithinOneMinFromForegrounded()) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Foregrounded",
- timeout_event);
- recorded = true;
- }
-
- if (!recorded) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Timeout.Normal",
- timeout_event);
- }
-}
-
-#if defined(OS_WIN)
-void GpuWatchdogThreadImplV2::RecordExtraThreadTimeHistogram() {
- // Record the number of timeouts the GPU main thread needs to make a progress
- // after GPU OnWatchdogTimeout() is triggered. The maximum count is 6 which
- // is more than kMaxCountOfMoreGpuThreadTimeAllowed(4);
- constexpr int kMin = 1;
- constexpr int kMax = 6;
- constexpr int kBuckets = 6;
- int count = count_of_more_gpu_thread_time_allowed_;
- bool recorded = false;
-
- base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime", count,
- kMin, kMax, kBuckets);
-
- if (in_gpu_initialization_) {
- base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Init",
- count, kMin, kMax, kBuckets);
- recorded = true;
- }
-
- if (WithinOneMinFromPowerResumed()) {
- base::UmaHistogramCustomCounts(
- "GPU.WatchdogThread.ExtraThreadTime.PowerResume", count, kMin, kMax,
- kBuckets);
- recorded = true;
- }
-
- if (WithinOneMinFromForegrounded()) {
- base::UmaHistogramCustomCounts(
- "GPU.WatchdogThread.ExtraThreadTime.Foregrounded", count, kMin, kMax,
- kBuckets);
- recorded = true;
- }
-
- if (!recorded) {
- base::UmaHistogramCustomCounts("GPU.WatchdogThread.ExtraThreadTime.Normal",
- count, kMin, kMax, kBuckets);
- }
-}
-
-void GpuWatchdogThreadImplV2::
- RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(int count) {
- constexpr int kMax = 4;
-
- base::UmaHistogramExactLinear("GPU.WatchdogThread.ExtraThreadTime.NumOfUsers",
- count, kMax);
-}
-
-void GpuWatchdogThreadImplV2::WatchedThreadNeedsMoreThreadTimeHistogram(
- bool no_gpu_hang_detected,
- bool start_of_more_thread_time) {
- if (start_of_more_thread_time) {
- // This is the start of allowing more thread time. Only record it once for
- // all following timeouts on the same detected gpu hang, so we know this
- // is equivlent one crash in our crash reports.
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
- RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(0);
- } else {
- if (count_of_more_gpu_thread_time_allowed_ > 0) {
- if (no_gpu_hang_detected) {
- // If count_of_more_gpu_thread_time_allowed_ > 0, we know extra time was
- // extended in the previous OnWatchdogTimeout(). Now we find gpu makes
- // progress. Record this case.
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
- RecordExtraThreadTimeHistogram();
- } else {
- if (count_of_more_gpu_thread_time_allowed_ >=
- kMaxCountOfMoreGpuThreadTimeAllowed) {
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kLessThanFullThreadTimeAfterCapped);
- }
- }
-
- // Records the number of users who are still waiting. We can use this
- // number to calculate the number of users who had already quit.
- RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(
- count_of_more_gpu_thread_time_allowed_);
-
- // Used by GPU.WatchdogThread.WaitTime later
- time_in_wait_for_full_thread_time_ =
- count_of_more_gpu_thread_time_allowed_ * watchdog_timeout_;
- }
- }
-}
-#endif
-
-void GpuWatchdogThreadImplV2::WatchedThreadGetsExtraTimeoutHistogram(
- bool no_gpu_hang) {
- constexpr int kMax = 60;
- if (count_of_extra_cycles_ == 0 && !no_gpu_hang) {
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
- base::UmaHistogramExactLinear("GPU.WatchdogThread.WaitTime.NumOfUsers", 0,
- kMax);
- } else if (count_of_extra_cycles_ > 0) {
- int count = watchdog_timeout_.InSeconds() * count_of_extra_cycles_;
- base::UmaHistogramExactLinear("GPU.WatchdogThread.WaitTime.NumOfUsers",
- count, kMax);
- if (no_gpu_hang) {
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
- base::UmaHistogramExactLinear(
- "GPU.WatchdogThread.WaitTime.ProgressAfterWait", count, kMax);
-
-#if defined(OS_WIN)
- // Add the time the GPU thread was given for the full thread time up to 60
- // seconds. GPU.WatchdogThread.WaitTime is essentially equal to
- // GPU.WatchdogThread.WaitTime.ProgressAfterWait on non-Windows systems.
- base::TimeDelta wait_time = base::TimeDelta::FromSeconds(count);
- wait_time += time_in_wait_for_full_thread_time_;
-
- constexpr base::TimeDelta kMinTime = base::TimeDelta::FromSeconds(1);
- constexpr base::TimeDelta kMaxTime = base::TimeDelta::FromSeconds(150);
- constexpr int kBuckets = 50;
-
- // The time the GPU main thread takes to finish a task after a "hang" is
- // dectedted.
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.WaitTime", wait_time,
- kMinTime, kMaxTime, kBuckets);
-#endif
- }
- }
-}
-
-bool GpuWatchdogThreadImplV2::WithinOneMinFromPowerResumed() {
- size_t count = base::ClampFloor<size_t>(base::TimeDelta::FromMinutes(1) /
- watchdog_timeout_);
- return power_resumed_event_ && num_of_timeout_after_power_resume_ <= count;
-}
-
-bool GpuWatchdogThreadImplV2::WithinOneMinFromForegrounded() {
- size_t count = base::ClampFloor<size_t>(base::TimeDelta::FromMinutes(1) /
- watchdog_timeout_);
- return foregrounded_event_ && num_of_timeout_after_foregrounded_ <= count;
-}
-
-#if defined(USE_X11)
-void GpuWatchdogThreadImplV2::UpdateActiveTTY() {
- last_active_tty_ = active_tty_;
-
- active_tty_ = -1;
- char tty_string[8] = {0};
- if (tty_file_ && !fseek(tty_file_, 0, SEEK_SET) &&
- fread(tty_string, 1, 7, tty_file_)) {
- int tty_number;
- if (sscanf(tty_string, "tty%d\n", &tty_number) == 1) {
- active_tty_ = tty_number;
- }
- }
-}
-#endif
-
-bool GpuWatchdogThreadImplV2::ContinueOnNonHostX11ServerTty() {
-#if defined(USE_X11)
- if (host_tty_ == -1 || active_tty_ == -1)
- return false;
-
- // Don't crash if we're not on the TTY of our host X11 server.
- if (active_tty_ != host_tty_) {
- // Only record for the time there is a change on TTY
- if (last_active_tty_ == active_tty_) {
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kContinueOnNonHostServerTty);
- }
- return true;
- }
-#endif
- return false;
-}
-
-// For gpu testing only. Return whether a GPU hang was detected or not.
-bool GpuWatchdogThreadImplV2::IsGpuHangDetectedForTesting() {
- DCHECK(is_test_mode_);
- return test_result_timeout_and_gpu_hang_.IsSet();
-}
-
-// This should be called on the test main thread only. It will wait until the
-// power observer is added on the watchdog thread.
-void GpuWatchdogThreadImplV2::WaitForPowerObserverAddedForTesting() {
- DCHECK(watched_gpu_task_runner_->BelongsToCurrentThread());
- DCHECK(is_add_power_observer_called_);
-
- // Just return if it has been added.
- if (is_power_observer_added_)
- return;
-
- base::WaitableEvent event;
- task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&base::WaitableEvent::Signal, base::Unretained(&event)));
- event.Wait();
-}
-} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
deleted file mode 100644
index 2b038ecd4c4..00000000000
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
+++ /dev/null
@@ -1,263 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_SERVICE_GPU_WATCHDOG_THREAD_V2_H_
-#define GPU_IPC_SERVICE_GPU_WATCHDOG_THREAD_V2_H_
-
-#include "build/build_config.h"
-#include "gpu/ipc/service/gpu_watchdog_thread.h"
-
-namespace gpu {
-#if defined(OS_WIN)
-// If the actual time the watched GPU thread spent doing actual work is less
-// than the wathdog timeout, the GPU thread can continue running through
-// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed.
-constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 3;
-#endif
-constexpr int kMaxExtraCyclesBeforeKill = 0;
-
-class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
- : public GpuWatchdogThread,
- public base::TaskObserver {
- public:
- static std::unique_ptr<GpuWatchdogThreadImplV2> Create(
- bool start_backgrounded);
-
- static std::unique_ptr<GpuWatchdogThreadImplV2> Create(
- bool start_backgrounded,
- base::TimeDelta timeout,
- int init_factor,
- int restart_factor,
- int max_extra_cycles_before_kill,
- bool test_mode);
-
- ~GpuWatchdogThreadImplV2() override;
-
- // Implements GpuWatchdogThread.
- void AddPowerObserver() override;
- void OnBackgrounded() override;
- void OnForegrounded() override;
- void OnInitComplete() override;
- void OnGpuProcessTearDown() override;
- void ResumeWatchdog() override;
- void PauseWatchdog() override;
- bool IsGpuHangDetectedForTesting() override;
- void WaitForPowerObserverAddedForTesting() override;
-
- // Implements base::Thread.
- void Init() override;
- void CleanUp() override;
-
- // Implements gl::ProgressReporter.
- void ReportProgress() override;
-
- // Implements TaskObserver.
- void WillProcessTask(const base::PendingTask& pending_task,
- bool was_blocked_or_low_priority) override;
- void DidProcessTask(const base::PendingTask& pending_task) override;
-
- // Implements base::PowerObserver.
- void OnSuspend() override;
- void OnResume() override;
-
- private:
- enum PauseResumeSource {
- kAndroidBackgroundForeground = 0,
- kPowerSuspendResume = 1,
- kGeneralGpuFlow = 2,
- };
-
- GpuWatchdogThreadImplV2(base::TimeDelta timeout,
- int init_factor,
- int restart_factor,
- int max_extra_cycles_before_kill,
- bool test_mode);
- void OnAddPowerObserver();
- void RestartWatchdogTimeoutTask(PauseResumeSource source_of_request);
- void StopWatchdogTimeoutTask(PauseResumeSource source_of_request);
- void UpdateInitializationFlag();
- void Arm();
- void Disarm();
- void InProgress();
- bool IsArmed();
- base::subtle::Atomic32 ReadArmDisarmCounter();
- void OnWatchdogTimeout();
- bool SlowWatchdogThread();
- bool WatchedThreadNeedsMoreThreadTime(bool no_gpu_hang_detected);
-#if defined(OS_WIN)
- base::ThreadTicks GetWatchedThreadTime();
-#endif
- bool WatchedThreadGetsExtraTimeout(bool no_gpu_hang);
-
- // Do not change the function name. It is used for [GPU HANG] carsh reports.
- void DeliberatelyTerminateToRecoverFromHang();
-
- // Records "GPU.WatchdogThread.Event".
- void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event);
-
- // Histogram recorded in OnWatchdogTimeout()
- // Records "GPU.WatchdogThread.Timeout"
- void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event);
-
-#if defined(OS_WIN)
- // The extra thread time the GPU main thread needs to make a progress.
- // Records "GPU.WatchdogThread.ExtraThreadTime".
- void RecordExtraThreadTimeHistogram();
- // The number of users per timeout stay in Chrome after giving extra thread
- // time. Records "GPU.WatchdogThread.ExtraThreadTime.NumOfUsers" and
- // "GPU.WatchdogThread.Timeout".
- void RecordNumOfUsersWaitingWithExtraThreadTimeHistogram(int count);
-
- // Histograms recorded for WatchedThreadNeedsMoreThreadTime() function.
- void WatchedThreadNeedsMoreThreadTimeHistogram(
- bool no_gpu_hang_detected,
- bool start_of_more_thread_time);
-#endif
-
- // The number of users stay in Chrome after the extra timeout wait cycles.
- // Records "GPU.WatchdogThread.WaitTime.ProgressAfterWait",
- // "GPU.WatchdogThread.WaitTime.NumOfUsers" and "GPU.WatchdogThread.Timeout".
- void WatchedThreadGetsExtraTimeoutHistogram(bool no_gpu_hang);
-
- // Used for metrics. It's 1 minute after the event.
- bool WithinOneMinFromPowerResumed();
- bool WithinOneMinFromForegrounded();
-
-#if defined(USE_X11)
- void UpdateActiveTTY();
-#endif
- // The watchdog continues when it's not on the TTY of our host X11 server.
- bool ContinueOnNonHostX11ServerTty();
-
- // This counter is only written on the gpu thread, and read on both threads.
- volatile base::subtle::Atomic32 arm_disarm_counter_ = 0;
- // The counter number read in the last OnWatchdogTimeout() on the watchdog
- // thread.
- int32_t last_arm_disarm_counter_ = 0;
-
- // Timeout on the watchdog thread to check if gpu hangs.
- base::TimeDelta watchdog_timeout_;
-
- // The one-time watchdog timeout multiplier in the gpu initialization.
- int watchdog_init_factor_;
-
- // The one-time watchdog timeout multiplier after the watchdog pauses and
- // restarts.
- int watchdog_restart_factor_;
-
- // The time the gpu watchdog was created.
- base::TimeTicks watchdog_start_timeticks_;
-
- // The time the last OnSuspend and OnResume was called.
- base::TimeTicks power_suspend_timeticks_;
- base::TimeTicks power_resume_timeticks_;
-
- // The time the last OnBackgrounded and OnForegrounded was called.
- base::TimeTicks backgrounded_timeticks_;
- base::TimeTicks foregrounded_timeticks_;
-
- // The time PauseWatchdog and ResumeWatchdog was called.
- base::TimeTicks watchdog_pause_timeticks_;
- base::TimeTicks watchdog_resume_timeticks_;
-
- // TimeTicks: Tracking the amount of time a task runs. Executing delayed
- // tasks at the right time.
- // ThreadTicks: Use this timer to (approximately) measure how much time the
- // calling thread spent doing actual work vs. being de-scheduled.
-
- // The time the last OnWatchdogTimeout() was called.
- base::TimeTicks last_on_watchdog_timeout_timeticks_;
-
- // The wall-clock time the next OnWatchdogTimeout() will be called.
- base::Time next_on_watchdog_timeout_time_;
-
-#if defined(OS_WIN)
- base::ThreadTicks last_on_watchdog_timeout_thread_ticks_;
-
- // The difference between the timeout and the actual time the watched thread
- // spent doing actual work.
- base::TimeDelta remaining_watched_thread_ticks_;
-
- // The Windows thread hanndle of the watched GPU main thread.
- void* watched_thread_handle_ = nullptr;
-
- // After GPU hang detected, how many times has the GPU thread been allowed to
- // continue due to not enough thread time.
- int count_of_more_gpu_thread_time_allowed_ = 0;
-
- // The total timeout, up to 60 seconds, the watchdog thread waits for the GPU
- // main thread to get full thread time.
- base::TimeDelta time_in_wait_for_full_thread_time_;
-
- // After detecting GPU hang and continuing running through
- // OnGpuWatchdogTimeout for the max cycles, the GPU main thread still cannot
- // get the full thread time.
- bool less_than_full_thread_time_after_capped_ = false;
-#endif
-
-#if defined(USE_X11)
- FILE* tty_file_ = nullptr;
- int host_tty_ = -1;
- int active_tty_ = -1;
- int last_active_tty_ = -1;
-#endif
-
- // The system has entered the power suspension mode.
- bool in_power_suspension_ = false;
-
- // The GPU process has started tearing down. Accessed only in the gpu process.
- bool in_gpu_process_teardown_ = false;
-
- // Chrome is running on the background on Android. Gpu is probably very slow
- // or stalled.
- bool is_backgrounded_ = false;
-
- // The GPU watchdog is paused. The timeout task is temporarily stopped.
- bool is_paused_ = false;
-
- // Whether the watchdog thread has been called and added to the power monitor
- // observer.
- bool is_add_power_observer_called_ = false;
- bool is_power_observer_added_ = false;
-
- // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
- bool is_watchdog_start_histogram_recorded = false;
-
- // Read/Write by the watchdog thread only after initialized in the
- // constructor.
- bool in_gpu_initialization_ = false;
-
- // The number of logical processors/cores on the current machine.
- int num_of_processors_ = 0;
-
- // Don't kill the GPU process immediately after a gpu hang is detected. Wait
- // for extra cycles of timeout. Kill it, if the GPU still doesn't respond
- // after wait.
- const int max_extra_cycles_before_kill_;
- // how many cycles of timeout since we detect a hang.
- int count_of_extra_cycles_ = 0;
-
- // For the experiment and the debugging purpose
- size_t num_of_timeout_after_power_resume_ = 0;
- size_t num_of_timeout_after_foregrounded_ = 0;
- bool foregrounded_event_ = false;
- bool power_resumed_event_ = false;
-
- // For gpu testing only.
- const bool is_test_mode_;
- // Set by the watchdog thread and Read by the test thread.
- base::AtomicFlag test_result_timeout_and_gpu_hang_;
-
- scoped_refptr<base::SingleThreadTaskRunner> watched_gpu_task_runner_;
- scoped_refptr<base::SingleThreadTaskRunner> watchdog_thread_task_runner_;
-
- base::WeakPtr<GpuWatchdogThreadImplV2> weak_ptr_;
- base::WeakPtrFactory<GpuWatchdogThreadImplV2> weak_factory_{this};
-
- DISALLOW_COPY_AND_ASSIGN(GpuWatchdogThreadImplV2);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_SERVICE_GPU_WATCHDOG_THREAD_V2_H_
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
index 462ed98cc09..caef591ae8d 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
@@ -22,6 +22,7 @@
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "build/build_config.h"
+#include "build/chromeos_buildflags.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/context_result.h"
#include "gpu/command_buffer/common/discardable_handle.h"
@@ -56,7 +57,7 @@
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_image.h"
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
#include "ui/gl/gl_image_native_pixmap.h"
#endif
@@ -64,7 +65,7 @@
namespace gpu {
class Buffer;
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
namespace {
struct CleanUpContext {
@@ -246,7 +247,7 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
std::vector<sk_sp<SkImage>> plane_sk_images;
base::Optional<base::ScopedClosureRunner> notify_gl_state_changed;
-#if defined(OS_CHROMEOS)
+#if BUILDFLAG(IS_ASH)
// Right now, we only support YUV 4:2:0 for the output of the decoder (either
// as YV12 or NV12).
//
@@ -357,17 +358,14 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
shared_context_state->PessimisticallyResetGrContext();
// Create a SkImage using the texture.
- // TODO(crbug.com/985458): ideally, we use GL_RG8_EXT for the NV12 chroma
- // plane. However, Skia does not have a corresponding SkColorType. Revisit
- // this when it's supported.
const GrBackendTexture plane_backend_texture(
plane_size.width(), plane_size.height(), GrMipMapped::kNo,
GrGLTextureInfo{GL_TEXTURE_EXTERNAL_OES, resource->texture,
- is_nv12_chroma_plane ? GL_RGBA8_EXT : GL_R8_EXT});
+ is_nv12_chroma_plane ? GL_RG8_EXT : GL_R8_EXT});
plane_sk_images[plane] = SkImage::MakeFromTexture(
shared_context_state->gr_context(), plane_backend_texture,
kTopLeft_GrSurfaceOrigin,
- is_nv12_chroma_plane ? kRGBA_8888_SkColorType : kAlpha_8_SkColorType,
+ is_nv12_chroma_plane ? kR8G8_unorm_SkColorType : kAlpha_8_SkColorType,
kOpaque_SkAlphaType, nullptr /* colorSpace */, CleanUpResource,
resource);
if (!plane_sk_images[plane]) {
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
index ab25393e785..88618132eb4 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
@@ -100,8 +100,7 @@ struct ExpectedCacheEntry {
SkISize dimensions;
};
-std::unique_ptr<MemoryTracker> CreateMockMemoryTracker(
- const GPUCreateCommandBufferConfig& init_params) {
+std::unique_ptr<MemoryTracker> CreateMockMemoryTracker() {
return std::make_unique<NiceMock<gles2::MockMemoryTracker>>();
}
@@ -135,7 +134,7 @@ base::CheckedNumeric<uint64_t> GetExpectedTotalMippedSizeForPlanarImage(
base::CheckedNumeric<uint64_t> safe_total_image_size = 0u;
for (const auto& plane_image : decode_entry->plane_images()) {
safe_total_image_size += base::strict_cast<uint64_t>(
- GrContext::ComputeImageSize(plane_image, GrMipMapped::kYes));
+ GrDirectContext::ComputeImageSize(plane_image, GrMipMapped::kYes));
}
return safe_total_image_size;
}
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 67e0efb23c7..cd73f9e5669 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -7,7 +7,7 @@
#include <sstream>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_task_runner_handle.h"
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 1c6f9f603bf..fbdcd4ee200 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -29,8 +29,6 @@ CreateDirectCompositionSurfaceSettings(
gl::DirectCompositionSurfaceWin::Settings settings;
settings.disable_nv12_dynamic_textures =
workarounds.disable_nv12_dynamic_textures;
- settings.disable_larger_than_screen_overlays =
- workarounds.disable_larger_than_screen_overlays;
settings.disable_vp_scaling = workarounds.disable_vp_scaling;
settings.use_angle_texture_offset = features::IsUsingSkiaRenderer();
settings.reset_vp_when_colorspace_changes =
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index 4d0703ea055..9f9ac2aa7b9 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -7,7 +7,7 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/command_line.h"
#include "base/metrics/histogram_macros.h"
#include "build/build_config.h"
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index e56d4324689..120a42928df 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -119,7 +119,7 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
use_virtualized_gl_context_ =
shared_context_state->use_virtualized_gl_contexts();
- memory_tracker_ = CreateMemoryTracker(init_params);
+ memory_tracker_ = CreateMemoryTracker();
command_buffer_ =
std::make_unique<CommandBufferService>(this, memory_tracker_.get());
@@ -177,8 +177,8 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
return gpu::ContextResult::kSuccess;
}
-MemoryTracker* RasterCommandBufferStub::GetMemoryTracker() const {
- return memory_tracker_.get();
+MemoryTracker* RasterCommandBufferStub::GetContextGroupMemoryTracker() const {
+ return nullptr;
}
bool RasterCommandBufferStub::HandleMessage(const IPC::Message& message) {
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.h b/chromium/gpu/ipc/service/raster_command_buffer_stub.h
index 920815eacbb..0c9162de388 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.h
@@ -27,15 +27,13 @@ class GPU_IPC_SERVICE_EXPORT RasterCommandBufferStub
CommandBufferStub* share_group,
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) override;
- MemoryTracker* GetMemoryTracker() const override;
+ MemoryTracker* GetContextGroupMemoryTracker() const override;
private:
bool HandleMessage(const IPC::Message& message) override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
void SetActiveURL(GURL url) override;
- std::unique_ptr<MemoryTracker> memory_tracker_;
-
DISALLOW_COPY_AND_ASSIGN(RasterCommandBufferStub);
};
diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
index 790ebc7c634..c4dd246a099 100644
--- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
@@ -106,7 +106,7 @@ gpu::ContextResult WebGPUCommandBufferStub::Initialize(
share_group_ = manager->share_group();
use_virtualized_gl_context_ = false;
- memory_tracker_ = CreateMemoryTracker(init_params);
+ memory_tracker_ = CreateMemoryTracker();
command_buffer_ =
std::make_unique<CommandBufferService>(this, memory_tracker_.get());
@@ -149,8 +149,8 @@ gpu::ContextResult WebGPUCommandBufferStub::Initialize(
#endif // defined(OS_FUCHSIA)
}
-MemoryTracker* WebGPUCommandBufferStub::GetMemoryTracker() const {
- return memory_tracker_.get();
+MemoryTracker* WebGPUCommandBufferStub::GetContextGroupMemoryTracker() const {
+ return nullptr;
}
bool WebGPUCommandBufferStub::HandleMessage(const IPC::Message& message) {
diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h
index b0f768f2b55..913eae7f986 100644
--- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.h
@@ -27,14 +27,12 @@ class GPU_IPC_SERVICE_EXPORT WebGPUCommandBufferStub
CommandBufferStub* share_group,
const GPUCreateCommandBufferConfig& init_params,
base::UnsafeSharedMemoryRegion shared_state_shm) override;
- MemoryTracker* GetMemoryTracker() const override;
+ MemoryTracker* GetContextGroupMemoryTracker() const override;
private:
bool HandleMessage(const IPC::Message& message) override;
void OnSwapBuffers(uint64_t swap_id, uint32_t flags) override;
- std::unique_ptr<MemoryTracker> memory_tracker_;
-
DISALLOW_COPY_AND_ASSIGN(WebGPUCommandBufferStub);
};
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.cc b/chromium/gpu/ipc/shared_image_interface_in_process.cc
index d6169e6b04b..54f97772018 100644
--- a/chromium/gpu/ipc/shared_image_interface_in_process.cc
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.cc
@@ -16,25 +16,21 @@
#include "ui/gl/gl_context.h"
namespace gpu {
+
SharedImageInterfaceInProcess::SharedImageInterfaceInProcess(
- CommandBufferTaskExecutor* task_executor,
- SingleTaskSequence* single_task_sequence,
- CommandBufferId command_buffer_id,
- MailboxManager* mailbox_manager,
- ImageFactory* image_factory,
- MemoryTracker* memory_tracker,
+ SingleTaskSequence* task_sequence,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller,
std::unique_ptr<CommandBufferHelper> command_buffer_helper)
- : task_sequence_(single_task_sequence),
- command_buffer_id_(command_buffer_id),
+ : task_sequence_(task_sequence),
+ command_buffer_id_(display_controller->NextCommandBufferId()),
command_buffer_helper_(std::move(command_buffer_helper)),
- shared_image_manager_(task_executor->shared_image_manager()),
- mailbox_manager_(mailbox_manager),
- sync_point_manager_(task_executor->sync_point_manager()) {
+ shared_image_manager_(display_controller->shared_image_manager()),
+ mailbox_manager_(display_controller->mailbox_manager()),
+ sync_point_manager_(display_controller->sync_point_manager()) {
DETACH_FROM_SEQUENCE(gpu_sequence_checker_);
task_sequence_->ScheduleTask(
base::BindOnce(&SharedImageInterfaceInProcess::SetUpOnGpu,
- base::Unretained(this), task_executor, image_factory,
- memory_tracker),
+ base::Unretained(this), display_controller),
{});
}
@@ -49,29 +45,26 @@ SharedImageInterfaceInProcess::~SharedImageInterfaceInProcess() {
{});
completion.Wait();
}
-
void SharedImageInterfaceInProcess::SetUpOnGpu(
- CommandBufferTaskExecutor* task_executor,
- ImageFactory* image_factory,
- MemoryTracker* memory_tracker) {
+ DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller) {
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ context_state_ = display_controller->shared_context_state();
- context_state_ = task_executor->GetSharedContextState().get();
create_factory_ = base::BindOnce(
- [](CommandBufferTaskExecutor* task_executor, ImageFactory* image_factory,
- MemoryTracker* memory_tracker, MailboxManager* mailbox_manager,
+ [](DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller,
bool enable_wrapped_sk_image) {
auto shared_image_factory = std::make_unique<SharedImageFactory>(
- task_executor->gpu_preferences(),
- GpuDriverBugWorkarounds(task_executor->gpu_feature_info()
- .enabled_gpu_driver_bug_workarounds),
- task_executor->gpu_feature_info(),
- task_executor->GetSharedContextState().get(), mailbox_manager,
- task_executor->shared_image_manager(), image_factory,
- memory_tracker, enable_wrapped_sk_image);
+ display_controller->gpu_preferences(),
+ display_controller->gpu_driver_bug_workarounds(),
+ display_controller->gpu_feature_info(),
+ display_controller->shared_context_state(),
+ display_controller->mailbox_manager(),
+ display_controller->shared_image_manager(),
+ display_controller->image_factory(),
+ display_controller->memory_tracker(), enable_wrapped_sk_image);
return shared_image_factory;
},
- task_executor, image_factory, memory_tracker, mailbox_manager_);
+ display_controller);
// Make the SharedImageInterface use the same sequence as the command buffer,
// it's necessary for WebView because of the blocking behavior.
@@ -122,9 +115,11 @@ void SharedImageInterfaceInProcess::LazyCreateSharedImageFactory() {
return;
// We need WrappedSkImage to support creating a SharedImage with pixel data
- // when GL is unavailable. This is used in various unit tests.
+ // when GL is unavailable. This is used in various unit tests. If we don't
+ // have a command buffer helper, that means this class is created for
+ // SkiaRenderer, and we definitely need to turn on enable_wrapped_sk_image.
const bool enable_wrapped_sk_image =
- command_buffer_helper_ && command_buffer_helper_->EnableWrappedSkImage();
+ !command_buffer_helper_ || command_buffer_helper_->EnableWrappedSkImage();
shared_image_factory_ =
std::move(create_factory_).Run(enable_wrapped_sk_image);
}
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.h b/chromium/gpu/ipc/shared_image_interface_in_process.h
index 7ce2bc3eb52..8d74513f041 100644
--- a/chromium/gpu/ipc/shared_image_interface_in_process.h
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.h
@@ -11,10 +11,7 @@
#include "gpu/ipc/in_process_command_buffer.h"
namespace gpu {
-class CommandBufferTaskExecutor;
-class ImageFactory;
class MailboxManager;
-class MemoryTracker;
class SyncPointClientState;
struct SyncToken;
class SharedContextState;
@@ -22,7 +19,7 @@ class SharedImageFactory;
class SharedImageManager;
class SingleTaskSequence;
-// This is an implementation of the SharedImageInterface to be used on viz
+// This is an implementation of the SharedImageInterface to be used on the viz
// compositor thread. This class also implements the corresponding parts
// happening on gpu thread.
// TODO(weiliangc): Currently this is implemented as backed by
@@ -33,12 +30,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
using CommandBufferHelper =
InProcessCommandBuffer::SharedImageInterfaceHelper;
SharedImageInterfaceInProcess(
- CommandBufferTaskExecutor* task_executor,
SingleTaskSequence* task_sequence,
- CommandBufferId command_buffer_id,
- MailboxManager* mailbox_manager,
- ImageFactory* image_factory,
- MemoryTracker* memory_tracker,
+ DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller,
std::unique_ptr<CommandBufferHelper> command_buffer_helper);
~SharedImageInterfaceInProcess() override;
@@ -154,9 +147,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
private:
struct SharedImageFactoryInput;
- void SetUpOnGpu(CommandBufferTaskExecutor* task_executor,
- ImageFactory* image_factory,
- MemoryTracker* memory_tracker);
+ void SetUpOnGpu(
+ DisplayCompositorMemoryAndTaskControllerOnGpu* display_controller);
void DestroyOnGpu(base::WaitableEvent* completion);
SyncToken MakeSyncToken(uint64_t release_id) {
diff --git a/chromium/gpu/ipc/webgpu_in_process_context.cc b/chromium/gpu/ipc/webgpu_in_process_context.cc
index 1ee997439c1..3027d45f915 100644
--- a/chromium/gpu/ipc/webgpu_in_process_context.cc
+++ b/chromium/gpu/ipc/webgpu_in_process_context.cc
@@ -61,7 +61,9 @@ ContextResult WebGPUInProcessContext::Initialize(
auto result = command_buffer_->Initialize(
surface, is_offscreen, kNullSurfaceHandle, attribs,
gpu_memory_buffer_manager, image_factory, gpu_channel_manager_delegate,
- client_task_runner_, nullptr /* task_sequence */, nullptr, nullptr);
+ client_task_runner_, nullptr /* task_sequence */,
+ nullptr /* display_compositor_memory_and_task_controller_on_gpu */,
+ nullptr, nullptr);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
diff --git a/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc b/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
index 4a7a043c83f..475032f48b3 100644
--- a/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
+++ b/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
@@ -37,7 +37,6 @@
#include "ui/base/x/x11_util.h"
#include "ui/gfx/x/connection.h"
#include "ui/gfx/x/glx.h"
-#include "ui/gfx/x/x11.h"
#include "ui/gfx/x/xproto.h"
#include "ui/gfx/x/xproto_util.h"
#include "ui/gl/glx_util.h"
@@ -66,19 +65,18 @@ class Simulator {
Simulator(int seconds_per_test, const base::FilePath& output_path)
: output_path_(output_path),
seconds_per_test_(seconds_per_test),
- display_(nullptr),
gl_context_(nullptr),
window_width_(WINDOW_WIDTH),
window_height_(WINDOW_HEIGHT) {}
~Simulator() {
// Cleanup GL.
- glXMakeCurrent(display_, 0, nullptr);
- glXDestroyContext(display_, gl_context_);
+ auto display = connection_->GetXlibDisplay(x11::XlibDisplayType::kFlushing);
+ glXMakeCurrent(display, 0, nullptr);
+ glXDestroyContext(display, gl_context_);
- // Destroy window and display.
- connection_->DestroyWindow({window_});
- XCloseDisplay(display_);
+ // The window and X11 connection will be cleaned up when connection_ is
+ // destroyed.
}
void QueueTest(const base::FilePath& path) {
@@ -152,9 +150,8 @@ class Simulator {
// X11 window. Further initialization is done in X11VideoRenderer.
bool InitX11() {
connection_ = std::make_unique<x11::Connection>();
- display_ = connection_->display();
- if (!display_) {
- LOG(FATAL) << "Cannot open display";
+ if (!connection_->Ready()) {
+ LOG(FATAL) << "Cannot open X11 connection";
return false;
}
@@ -197,18 +194,20 @@ class Simulator {
auto* glx_config = gl::GetFbConfigForWindow(connection_.get(), window_);
if (!glx_config)
return false;
- auto* visual = glXGetVisualFromFBConfig(display_, glx_config);
+ auto* visual =
+ glXGetVisualFromFBConfig(connection_->GetXlibDisplay(), glx_config);
DCHECK(visual);
- gl_context_ = glXCreateContext(display_, visual, nullptr,
- true /* Direct rendering */);
+ gl_context_ = glXCreateContext(
+ connection_->GetXlibDisplay(x11::XlibDisplayType::kSyncing), visual,
+ nullptr, true /* Direct rendering */);
if (!gl_context_)
return false;
- if (!glXMakeCurrent(display_, static_cast<uint32_t>(window_),
- gl_context_)) {
- glXDestroyContext(display_, gl_context_);
+ auto display = connection_->GetXlibDisplay(x11::XlibDisplayType::kFlushing);
+ if (!glXMakeCurrent(display, static_cast<uint32_t>(window_), gl_context_)) {
+ glXDestroyContext(display, gl_context_);
gl_context_ = nullptr;
return false;
}
@@ -246,7 +245,8 @@ class Simulator {
if (current_sim_)
current_sim_->Update();
- glXSwapBuffers(display_, static_cast<uint32_t>(window_));
+ glXSwapBuffers(connection_->GetXlibDisplay(x11::XlibDisplayType::kFlushing),
+ static_cast<uint32_t>(window_));
auto window = static_cast<x11::Window>(window_);
x11::ExposeEvent ev{
@@ -339,7 +339,6 @@ class Simulator {
int seconds_per_test_;
// GUI data
std::unique_ptr<x11::Connection> connection_;
- Display* display_;
x11::Window window_ = x11::Window::None;
GLXContext gl_context_;
int window_width_;
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 28ee6232cad..043ce01bf12 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -4,18 +4,11 @@
import("//build/buildflag_header.gni")
import("//build/config/dcheck_always_on.gni")
+import("//build/config/ozone.gni")
import("//build/config/ui.gni")
import("//testing/test.gni")
-import("//ui/ozone/ozone.gni")
import("features.gni")
-# This file depends on the legacy global sources assignment filter. It should
-# be converted to check target platform before assigning source files to the
-# sources variable. Remove this import and set_sources_assignment_filter call
-# when the file has been converted. See https://crbug.com/1018739 for details.
-import("//build/config/deprecated_default_sources_assignment_filter.gni")
-set_sources_assignment_filter(deprecated_default_sources_assignment_filter)
-
# Generate a buildflag header for compile-time checking of Vulkan support.
buildflag_header("buildflags") {
header = "buildflags.h"
@@ -23,11 +16,11 @@ buildflag_header("buildflags") {
}
if (enable_vulkan) {
- use_vulkan_xlib = use_x11 || ozone_platform_x11
+ use_vulkan_xcb = use_x11 || ozone_platform_x11
config("vulkan_config") {
defines = [ "VK_NO_PROTOTYPES" ]
- if (use_vulkan_xlib) {
- defines += [ "USE_VULKAN_XLIB" ]
+ if (use_vulkan_xcb) {
+ defines += [ "USE_VULKAN_XCB" ]
}
}
@@ -131,10 +124,11 @@ if (enable_vulkan) {
data_deps = []
if (is_posix) {
- sources += [
- "vulkan_image_linux.cc",
- "vulkan_util_posix.cc",
- ]
+ sources += [ "vulkan_util_posix.cc" ]
+
+ if (is_linux || is_chromeos) {
+ sources += [ "vulkan_image_linux.cc" ]
+ }
}
if (is_android) {
diff --git a/chromium/gpu/vulkan/DIR_METADATA b/chromium/gpu/vulkan/DIR_METADATA
new file mode 100644
index 00000000000..8e87cca679a
--- /dev/null
+++ b/chromium/gpu/vulkan/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+ component: "Internals>GPU>Internals"
+} \ No newline at end of file
diff --git a/chromium/gpu/vulkan/OWNERS b/chromium/gpu/vulkan/OWNERS
index ec258006c54..1879da134c9 100644
--- a/chromium/gpu/vulkan/OWNERS
+++ b/chromium/gpu/vulkan/OWNERS
@@ -1,4 +1,2 @@
vmiura@chromium.org
penghuang@chromium.org
-
-# COMPONENT: Internals>GPU>Internals
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
index ba6c1be89a6..9be7cc491ee 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
@@ -5,7 +5,7 @@
#include "gpu/vulkan/android/vulkan_implementation_android.h"
#include "base/android/android_hardware_buffer_compat.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "gpu/ipc/common/vulkan_ycbcr_info.h"
diff --git a/chromium/gpu/vulkan/demo/BUILD.gn b/chromium/gpu/vulkan/demo/BUILD.gn
index 6d4351cfb3f..ae8cf7618b6 100644
--- a/chromium/gpu/vulkan/demo/BUILD.gn
+++ b/chromium/gpu/vulkan/demo/BUILD.gn
@@ -28,6 +28,7 @@ if (use_x11) {
"//components/tracing:startup_tracing",
"//components/viz/common",
"//gpu/vulkan/init",
+ "//skia",
"//ui/display/types",
"//ui/events",
"//ui/events/platform",
diff --git a/chromium/gpu/vulkan/demo/DEPS b/chromium/gpu/vulkan/demo/DEPS
index e75b1b59d8e..d590920e56e 100644
--- a/chromium/gpu/vulkan/demo/DEPS
+++ b/chromium/gpu/vulkan/demo/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"+components/tracing",
"+components/viz",
+ "+skia/ext",
"+third_party/skia",
"+ui",
]
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.cc b/chromium/gpu/vulkan/demo/vulkan_demo.cc
index c2fe83878cb..2ebd3807382 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.cc
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.cc
@@ -12,6 +12,7 @@
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_surface.h"
+#include "skia/ext/legacy_display_globals.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkFont.h"
#include "third_party/skia/include/core/SkSurface.h"
@@ -114,12 +115,15 @@ void VulkanDemo::CreateSkSurface() {
if (!sk_surface) {
SkSurfaceProps surface_props =
- SkSurfaceProps(0, SkSurfaceProps::kLegacyFontHost_InitType);
+ skia::LegacyDisplayGlobals::GetSkSurfaceProps();
+
GrVkImageInfo vk_image_info;
vk_image_info.fImage = scoped_write_->image();
vk_image_info.fImageLayout = scoped_write_->image_layout();
vk_image_info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
vk_image_info.fFormat = VK_FORMAT_B8G8R8A8_UNORM;
+ vk_image_info.fImageUsageFlags = scoped_write_->image_usage();
+ vk_image_info.fSampleCount = 1;
vk_image_info.fLevelCount = 1;
const auto& size = vulkan_surface_->image_size();
GrBackendRenderTarget render_target(size.width(), size.height(), 0,
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index 326f5eef0c3..346887a2d30 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -72,11 +72,11 @@ VULKAN_INSTANCE_FUNCTIONS = [
]
},
{
- 'ifdef': 'defined(USE_VULKAN_XLIB)',
- 'extension': 'VK_KHR_XLIB_SURFACE_EXTENSION_NAME',
+ 'ifdef': 'defined(USE_VULKAN_XCB)',
+ 'extension': 'VK_KHR_XCB_SURFACE_EXTENSION_NAME',
'functions': [
- 'vkCreateXlibSurfaceKHR',
- 'vkGetPhysicalDeviceXlibPresentationSupportKHR',
+ 'vkCreateXcbSurfaceKHR',
+ 'vkGetPhysicalDeviceXcbPresentationSupportKHR',
]
},
{
@@ -381,11 +381,10 @@ def GenerateHeaderFile(out_file):
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#endif
-#if defined(USE_VULKAN_XLIB)
-typedef struct _XDisplay Display;
-typedef unsigned long Window;
-typedef unsigned long VisualID;
-#include <vulkan/vulkan_xlib.h>
+#if defined(USE_VULKAN_XCB)
+#include <xcb/xcb.h>
+// <vulkan/vulkan_xcb.h> must be included after <xcb/xcb.h>
+#include <vulkan/vulkan_xcb.h>
#endif
#if defined(OS_WIN)
diff --git a/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc b/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
index 91d0dd14af7..5c11de431c8 100644
--- a/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
+++ b/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
@@ -10,6 +10,7 @@
#include "base/trace_event/trace_event.h"
#include "gpu/vulkan/vma_wrapper.h"
#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
namespace gpu {
diff --git a/chromium/gpu/vulkan/vma_wrapper.cc b/chromium/gpu/vulkan/vma_wrapper.cc
index d0541c92ad6..f30f3740224 100644
--- a/chromium/gpu/vulkan/vma_wrapper.cc
+++ b/chromium/gpu/vulkan/vma_wrapper.cc
@@ -15,6 +15,7 @@ namespace vma {
VkResult CreateAllocator(VkPhysicalDevice physical_device,
VkDevice device,
VkInstance instance,
+ const VkDeviceSize* heap_size_limit,
VmaAllocator* pAllocator) {
auto* function_pointers = gpu::GetVulkanFunctionPointers();
VmaVulkanFunctions functions = {
@@ -53,6 +54,7 @@ VkResult CreateAllocator(VkPhysicalDevice physical_device,
// AMD allocator will start making blocks at 1/8 the max size and builds
// up block size as needed before capping at the max set here.
.preferredLargeHeapBlockSize = 4 * 1024 * 1024,
+ .pHeapSizeLimit = heap_size_limit,
.pVulkanFunctions = &functions,
.instance = instance,
.vulkanApiVersion = kVulkanRequiredApiVersion,
@@ -154,5 +156,19 @@ void CalculateStats(VmaAllocator allocator, VmaStats* stats) {
vmaCalculateStats(allocator, stats);
}
+uint64_t GetTotalAllocatedMemory(VmaAllocator allocator) {
+ VmaBudget budget[VK_MAX_MEMORY_HEAPS];
+ vmaGetBudget(allocator, budget);
+ const VkPhysicalDeviceMemoryProperties* pPhysicalDeviceMemoryProperties;
+ vmaGetMemoryProperties(allocator, &pPhysicalDeviceMemoryProperties);
+ uint64_t total_allocated_memory = 0;
+ for (uint32_t i = 0; i < pPhysicalDeviceMemoryProperties->memoryHeapCount;
+ ++i) {
+ total_allocated_memory +=
+ std::max(budget[i].blockBytes, budget[i].allocationBytes);
+ }
+ return total_allocated_memory;
+}
+
} // namespace vma
} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vma_wrapper.h b/chromium/gpu/vulkan/vma_wrapper.h
index 502bdbcd15e..470392e4fb7 100644
--- a/chromium/gpu/vulkan/vma_wrapper.h
+++ b/chromium/gpu/vulkan/vma_wrapper.h
@@ -23,6 +23,7 @@ COMPONENT_EXPORT(VULKAN)
VkResult CreateAllocator(VkPhysicalDevice physical_device,
VkDevice device,
VkInstance instance,
+ const VkDeviceSize* heap_size_limit,
VmaAllocator* allocator);
COMPONENT_EXPORT(VULKAN) void DestroyAllocator(VmaAllocator allocator);
@@ -95,6 +96,9 @@ void GetPhysicalDeviceProperties(
COMPONENT_EXPORT(VULKAN)
void CalculateStats(VmaAllocator allocator, VmaStats* stats);
+COMPONENT_EXPORT(VULKAN)
+uint64_t GetTotalAllocatedMemory(VmaAllocator allocator);
+
} // namespace vma
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.cc b/chromium/gpu/vulkan/vulkan_device_queue.cc
index e30bbd11632..b14a6c5e451 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.cc
+++ b/chromium/gpu/vulkan/vulkan_device_queue.cc
@@ -39,7 +39,8 @@ bool VulkanDeviceQueue::Initialize(
const std::vector<const char*>& required_extensions,
const std::vector<const char*>& optional_extensions,
bool allow_protected_memory,
- const GetPresentationSupportCallback& get_presentation_support) {
+ const GetPresentationSupportCallback& get_presentation_support,
+ uint32_t heap_memory_limit) {
DCHECK_EQ(static_cast<VkPhysicalDevice>(VK_NULL_HANDLE), vk_physical_device_);
DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), owned_vk_device_);
DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), vk_device_);
@@ -266,8 +267,11 @@ bool VulkanDeviceQueue::Initialize(
vkGetDeviceQueue(vk_device_, queue_index, 0, &vk_queue_);
}
+ std::vector<VkDeviceSize> heap_size_limit(
+ VK_MAX_MEMORY_HEAPS,
+ heap_memory_limit ? heap_memory_limit : VK_WHOLE_SIZE);
vma::CreateAllocator(vk_physical_device_, vk_device_, vk_instance_,
- &vma_allocator_);
+ heap_size_limit.data(), &vma_allocator_);
cleanup_helper_ = std::make_unique<VulkanFenceHelper>(this);
allow_protected_memory_ = allow_protected_memory;
@@ -291,7 +295,7 @@ bool VulkanDeviceQueue::InitializeForWebView(
vk_queue_index_ = vk_queue_index;
enabled_extensions_ = std::move(enabled_extensions);
- vma::CreateAllocator(vk_physical_device_, vk_device_, vk_instance_,
+ vma::CreateAllocator(vk_physical_device_, vk_device_, vk_instance_, nullptr,
&vma_allocator_);
cleanup_helper_ = std::make_unique<VulkanFenceHelper>(this);
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.h b/chromium/gpu/vulkan/vulkan_device_queue.h
index f7b23c07609..44d7609879e 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.h
+++ b/chromium/gpu/vulkan/vulkan_device_queue.h
@@ -46,7 +46,8 @@ class COMPONENT_EXPORT(VULKAN) VulkanDeviceQueue {
const std::vector<const char*>& required_extensions,
const std::vector<const char*>& optional_extensions,
bool allow_protected_memory,
- const GetPresentationSupportCallback& get_presentation_support);
+ const GetPresentationSupportCallback& get_presentation_support,
+ uint32_t heap_memory_limit);
bool InitializeForWebView(VkPhysicalDevice vk_physical_device,
VkDevice vk_device,
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.h b/chromium/gpu/vulkan/vulkan_fence_helper.h
index 571fc97ca66..02b512943dd 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.h
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.h
@@ -7,8 +7,8 @@
#include <vulkan/vulkan.h>
-#include "base/bind_helpers.h"
#include "base/callback.h"
+#include "base/callback_helpers.h"
#include "base/component_export.h"
#include "base/containers/circular_deque.h"
#include "base/macros.h"
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index 3bb6e68f61e..3fa104f0d30 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -260,28 +260,28 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
}
}
-#if defined(USE_VULKAN_XLIB)
+#if defined(USE_VULKAN_XCB)
if (gfx::HasExtension(enabled_extensions,
- VK_KHR_XLIB_SURFACE_EXTENSION_NAME)) {
- vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
- vkGetInstanceProcAddr(vk_instance, "vkCreateXlibSurfaceKHR"));
- if (!vkCreateXlibSurfaceKHR) {
+ VK_KHR_XCB_SURFACE_EXTENSION_NAME)) {
+ vkCreateXcbSurfaceKHR = reinterpret_cast<PFN_vkCreateXcbSurfaceKHR>(
+ vkGetInstanceProcAddr(vk_instance, "vkCreateXcbSurfaceKHR"));
+ if (!vkCreateXcbSurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkCreateXlibSurfaceKHR";
+ << "vkCreateXcbSurfaceKHR";
return false;
}
- vkGetPhysicalDeviceXlibPresentationSupportKHR =
- reinterpret_cast<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>(
+ vkGetPhysicalDeviceXcbPresentationSupportKHR =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR>(
vkGetInstanceProcAddr(
- vk_instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"));
- if (!vkGetPhysicalDeviceXlibPresentationSupportKHR) {
+ vk_instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR"));
+ if (!vkGetPhysicalDeviceXcbPresentationSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceXlibPresentationSupportKHR";
+ << "vkGetPhysicalDeviceXcbPresentationSupportKHR";
return false;
}
}
-#endif // defined(USE_VULKAN_XLIB)
+#endif // defined(USE_VULKAN_XCB)
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index 79d9b96a8cc..e1a6e5cd8d7 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -31,11 +31,10 @@
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#endif
-#if defined(USE_VULKAN_XLIB)
-typedef struct _XDisplay Display;
-typedef unsigned long Window;
-typedef unsigned long VisualID;
-#include <vulkan/vulkan_xlib.h>
+#if defined(USE_VULKAN_XCB)
+#include <xcb/xcb.h>
+// <vulkan/vulkan_xcb.h> must be included after <xcb/xcb.h>
+#include <vulkan/vulkan_xcb.h>
#endif
#if defined(OS_WIN)
@@ -142,11 +141,11 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>
vkGetPhysicalDeviceSurfaceSupportKHR;
-#if defined(USE_VULKAN_XLIB)
- VulkanFunction<PFN_vkCreateXlibSurfaceKHR> vkCreateXlibSurfaceKHR;
- VulkanFunction<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>
- vkGetPhysicalDeviceXlibPresentationSupportKHR;
-#endif // defined(USE_VULKAN_XLIB)
+#if defined(USE_VULKAN_XCB)
+ VulkanFunction<PFN_vkCreateXcbSurfaceKHR> vkCreateXcbSurfaceKHR;
+ VulkanFunction<PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR>
+ vkGetPhysicalDeviceXcbPresentationSupportKHR;
+#endif // defined(USE_VULKAN_XCB)
#if defined(OS_WIN)
VulkanFunction<PFN_vkCreateWin32SurfaceKHR> vkCreateWin32SurfaceKHR;
@@ -459,25 +458,25 @@ vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
physicalDevice, queueFamilyIndex, surface, pSupported);
}
-#if defined(USE_VULKAN_XLIB)
+#if defined(USE_VULKAN_XCB)
ALWAYS_INLINE VkResult
-vkCreateXlibSurfaceKHR(VkInstance instance,
- const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
- const VkAllocationCallbacks* pAllocator,
- VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateXlibSurfaceKHR(
+vkCreateXcbSurfaceKHR(VkInstance instance,
+ const VkXcbSurfaceCreateInfoKHR* pCreateInfo,
+ const VkAllocationCallbacks* pAllocator,
+ VkSurfaceKHR* pSurface) {
+ return gpu::GetVulkanFunctionPointers()->vkCreateXcbSurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
ALWAYS_INLINE VkBool32
-vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
- uint32_t queueFamilyIndex,
- Display* dpy,
- VisualID visualID) {
+vkGetPhysicalDeviceXcbPresentationSupportKHR(VkPhysicalDevice physicalDevice,
+ uint32_t queueFamilyIndex,
+ xcb_connection_t* connection,
+ xcb_visualid_t visual_id) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceXlibPresentationSupportKHR(
- physicalDevice, queueFamilyIndex, dpy, visualID);
+ ->vkGetPhysicalDeviceXcbPresentationSupportKHR(
+ physicalDevice, queueFamilyIndex, connection, visual_id);
}
-#endif // defined(USE_VULKAN_XLIB)
+#endif // defined(USE_VULKAN_XCB)
#if defined(OS_WIN)
ALWAYS_INLINE VkResult
diff --git a/chromium/gpu/vulkan/vulkan_image.h b/chromium/gpu/vulkan/vulkan_image.h
index 57b2460ce70..dc19798c0a4 100644
--- a/chromium/gpu/vulkan/vulkan_image.h
+++ b/chromium/gpu/vulkan/vulkan_image.h
@@ -15,6 +15,7 @@
#include "gpu/ipc/common/vulkan_ycbcr_info.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gfx/native_pixmap.h"
#if defined(OS_WIN)
#include "base/win/scoped_handle.h"
@@ -113,6 +114,12 @@ class COMPONENT_EXPORT(VULKAN) VulkanImage {
VkImage image() const { return image_; }
VkDeviceMemory device_memory() const { return device_memory_; }
VkExternalMemoryHandleTypeFlags handle_types() const { return handle_types_; }
+ void set_native_pixmap(scoped_refptr<gfx::NativePixmap> pixmap) {
+ native_pixmap_ = std::move(pixmap);
+ }
+ const scoped_refptr<gfx::NativePixmap>& native_pixmap() const {
+ return native_pixmap_;
+ }
private:
bool Initialize(VulkanDeviceQueue* device_queue,
@@ -153,6 +160,7 @@ class COMPONENT_EXPORT(VULKAN) VulkanImage {
VkImage image_ = VK_NULL_HANDLE;
VkDeviceMemory device_memory_ = VK_NULL_HANDLE;
VkExternalMemoryHandleTypeFlags handle_types_ = 0;
+ scoped_refptr<gfx::NativePixmap> native_pixmap_;
};
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_image_android.cc b/chromium/gpu/vulkan/vulkan_image_android.cc
index 7d64386a332..2939879a34c 100644
--- a/chromium/gpu/vulkan/vulkan_image_android.cc
+++ b/chromium/gpu/vulkan/vulkan_image_android.cc
@@ -100,6 +100,11 @@ bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
return false;
}
+ // Skia currently requires all wrapped VkImages to have transfer src and dst
+ // usage. Additionally all AHB support these usages when imported into vulkan.
+ usage_flags |=
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
VkImageCreateFlags create_flags = 0;
if (ahb_desc.usage & AHARDWAREBUFFER_USAGE_PROTECTED_CONTENT) {
create_flags = VK_IMAGE_CREATE_PROTECTED_BIT;
diff --git a/chromium/gpu/vulkan/vulkan_implementation.cc b/chromium/gpu/vulkan/vulkan_implementation.cc
index cc892c587d9..5eabfce4d49 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.cc
+++ b/chromium/gpu/vulkan/vulkan_implementation.cc
@@ -23,7 +23,8 @@ VulkanImplementation::~VulkanImplementation() {}
std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
VulkanImplementation* vulkan_implementation,
uint32_t option,
- const GPUInfo* gpu_info) {
+ const GPUInfo* gpu_info,
+ uint32_t heap_memory_limmit) {
auto device_queue = std::make_unique<VulkanDeviceQueue>(
vulkan_implementation->GetVulkanInstance()->vk_instance(),
vulkan_implementation->enforce_protected_memory());
@@ -38,7 +39,8 @@ std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
option, gpu_info,
vulkan_implementation->GetVulkanInstance()->vulkan_info(),
std::move(required_extensions), std::move(optional_extensions),
- vulkan_implementation->allow_protected_memory(), callback)) {
+ vulkan_implementation->allow_protected_memory(), callback,
+ heap_memory_limmit)) {
device_queue->Destroy();
return nullptr;
}
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 278bad3cd7f..8c156b6ffba 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -157,7 +157,8 @@ COMPONENT_EXPORT(VULKAN)
std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
VulkanImplementation* vulkan_implementation,
uint32_t option,
- const GPUInfo* gpu_info = nullptr);
+ const GPUInfo* gpu_info = nullptr,
+ uint32_t heap_memory_limit = 0);
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index c68a852d2a4..afb7b49b1b1 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -177,31 +177,6 @@ bool VulkanInstance::Initialize(
return false;
}
- gfx::ExtensionSet enabled_extensions(
- std::begin(vulkan_info_.enabled_instance_extensions),
- std::end(vulkan_info_.enabled_instance_extensions));
-
-#if DCHECK_IS_ON()
- // TODO(crbug.com/843346): Make validation work in combination with
- // VK_KHR_xlib_surface or switch to VK_KHR_xcb_surface.
- bool require_xlib_surface_extension =
- gfx::HasExtension(enabled_extensions, "VK_KHR_xlib_surface");
-
- // VK_LAYER_KHRONOS_validation 1.1.106 is required to support
- // VK_KHR_xlib_surface.
- constexpr base::StringPiece standard_validation(
- "VK_LAYER_KHRONOS_validation");
- for (const VkLayerProperties& layer_property : vulkan_info_.instance_layers) {
- if (standard_validation != layer_property.layerName)
- continue;
- if (!require_xlib_surface_extension ||
- layer_property.specVersion >= VK_MAKE_VERSION(1, 1, 106)) {
- enabled_layer_names.push_back(standard_validation.data());
- }
- break;
- }
-#endif // DCHECK_IS_ON()
-
VkInstanceCreateInfo instance_create_info = {
VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
nullptr, // pNext
@@ -220,6 +195,10 @@ bool VulkanInstance::Initialize(
return false;
}
+ gfx::ExtensionSet enabled_extensions(
+ std::begin(vulkan_info_.enabled_instance_extensions),
+ std::end(vulkan_info_.enabled_instance_extensions));
+
if (!vulkan_function_pointers->BindInstanceFunctionPointers(
vk_instance_, vulkan_info_.used_api_version, enabled_extensions)) {
return false;
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.cc b/chromium/gpu/vulkan/vulkan_swap_chain.cc
index ade4322d23c..dd51c61fb95 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.cc
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.cc
@@ -239,6 +239,8 @@ bool VulkanSwapChain::InitializeSwapChain(
base::TaskShutdownBehavior::BLOCK_SHUTDOWN, base::MayBlock()});
}
+ image_usage_ = image_usage_flags;
+
return true;
}
@@ -295,6 +297,7 @@ void VulkanSwapChain::DestroySwapImages() {
bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index,
VkImageLayout* image_layout,
+ VkImageUsageFlags* image_usage,
VkSemaphore* begin_semaphore,
VkSemaphore* end_semaphore) {
base::AutoLock auto_lock(lock_);
@@ -302,6 +305,7 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
DCHECK(image);
DCHECK(image_index);
DCHECK(image_layout);
+ DCHECK(image_usage);
DCHECK(begin_semaphore);
DCHECK(end_semaphore);
DCHECK(!is_writing_);
@@ -332,6 +336,7 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
*image = current_image_data.image;
*image_index = *acquired_image_;
*image_layout = current_image_data.image_layout;
+ *image_usage = image_usage_;
*begin_semaphore = current_image_data.acquire_semaphore;
*end_semaphore = current_image_data.present_semaphore;
is_writing_ = true;
@@ -573,7 +578,7 @@ void VulkanSwapChain::ReturnFenceAndSemaphores(
VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
: swap_chain_(swap_chain) {
success_ = swap_chain_->BeginWriteCurrentImage(
- &image_, &image_index_, &image_layout_, &begin_semaphore_,
+ &image_, &image_index_, &image_layout_, &image_usage_, &begin_semaphore_,
&end_semaphore_);
if (LIKELY(success_)) {
DCHECK(begin_semaphore_ != VK_NULL_HANDLE);
@@ -595,4 +600,4 @@ VulkanSwapChain::ScopedWrite::~ScopedWrite() {
}
}
-} // namespace gpu \ No newline at end of file
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.h b/chromium/gpu/vulkan/vulkan_swap_chain.h
index 5f94fca1a5f..b4ecf51ddc6 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.h
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.h
@@ -42,6 +42,7 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
VkImage image() const { return image_; }
uint32_t image_index() const { return image_index_; }
VkImageLayout image_layout() const { return image_layout_; }
+ VkImageUsageFlags image_usage() const { return image_usage_; }
VkSemaphore begin_semaphore() const { return begin_semaphore_; }
VkSemaphore end_semaphore() const { return end_semaphore_; }
@@ -51,6 +52,7 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
VkImage image_ = VK_NULL_HANDLE;
uint32_t image_index_ = 0;
VkImageLayout image_layout_ = VK_IMAGE_LAYOUT_UNDEFINED;
+ VkImageUsageFlags image_usage_ = 0;
VkSemaphore begin_semaphore_ = VK_NULL_HANDLE;
VkSemaphore end_semaphore_ = VK_NULL_HANDLE;
@@ -134,6 +136,7 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
bool BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index,
VkImageLayout* layout,
+ VkImageUsageFlags* usage,
VkSemaphore* begin_semaphore,
VkSemaphore* end_semaphore);
void EndWriteCurrentImage();
@@ -163,6 +166,8 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
// Images in the swap chain.
std::vector<ImageData> images_ GUARDED_BY(lock_);
+ VkImageUsageFlags image_usage_ = 0;
+
// True if BeginWriteCurrentImage() is called, but EndWriteCurrentImage() is
// not.
bool is_writing_ GUARDED_BY(lock_) = false;
@@ -204,4 +209,4 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
} // namespace gpu
-#endif // GPU_VULKAN_VULKAN_SWAP_CHAIN_H_ \ No newline at end of file
+#endif // GPU_VULKAN_VULKAN_SWAP_CHAIN_H_
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index ed4e4aac751..7d6019389ae 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -5,7 +5,7 @@
#include "gpu/vulkan/x/vulkan_implementation_x11.h"
#include "base/base_paths.h"
-#include "base/bind_helpers.h"
+#include "base/callback_helpers.h"
#include "base/files/file_path.h"
#include "base/notreached.h"
#include "base/optional.h"
@@ -20,8 +20,6 @@
#include "ui/gfx/gpu_fence.h"
#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/x/connection.h"
-#include "ui/gfx/x/x11.h"
-#include "ui/gfx/x/x11_types.h"
namespace gpu {
@@ -40,7 +38,7 @@ bool InitializeVulkanFunctionPointers(
VulkanImplementationX11::VulkanImplementationX11(bool use_swiftshader)
: VulkanImplementation(use_swiftshader) {
- gfx::GetXDisplay();
+ x11::Connection::Get();
}
VulkanImplementationX11::~VulkanImplementationX11() = default;
@@ -60,7 +58,7 @@ bool VulkanImplementationX11::InitializeVulkanInstance(bool using_surface) {
VK_KHR_EXTERNAL_SEMAPHORE_CAPABILITIES_EXTENSION_NAME};
if (using_surface_) {
required_extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME);
- required_extensions.push_back(VK_KHR_XLIB_SURFACE_EXTENSION_NAME);
+ required_extensions.push_back(VK_KHR_XCB_SURFACE_EXTENSION_NAME);
}
VulkanFunctionPointers* vulkan_function_pointers =
@@ -105,10 +103,9 @@ bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
if (use_swiftshader())
return true;
auto* connection = x11::Connection::Get();
- auto* display = connection->display();
- return vkGetPhysicalDeviceXlibPresentationSupportKHR(
- device, queue_family_index, display,
- static_cast<VisualID>(connection->default_root_visual().visual_id));
+ return vkGetPhysicalDeviceXcbPresentationSupportKHR(
+ device, queue_family_index, connection->XcbConnection(),
+ static_cast<xcb_visualid_t>(connection->default_root_visual().visual_id));
}
std::vector<const char*>
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
index 6819fdc36ca..5626066397c 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
@@ -10,7 +10,6 @@
#include "base/component_export.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_instance.h"
-#include "ui/gfx/x/x11_types.h"
namespace gpu {
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
index 9fd781dbe5b..29cc30a1538 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
@@ -20,8 +20,9 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
explicit ExposeEventForwarder(VulkanSurfaceX11* surface) : surface_(surface) {
if (auto* event_source = ui::X11EventSource::GetInstance()) {
x11::Connection::Get()->ChangeWindowAttributes(
- {.window = static_cast<x11::Window>(surface_->window_),
- .event_mask = x11::EventMask::Exposure});
+ x11::ChangeWindowAttributesRequest{
+ .window = static_cast<x11::Window>(surface_->window_),
+ .event_mask = x11::EventMask::Exposure});
event_source->AddXEventDispatcher(this);
}
}
@@ -57,7 +58,7 @@ std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
}
auto window = connection->GenerateId<x11::Window>();
- connection->CreateWindow({
+ connection->CreateWindow(x11::CreateWindowRequest{
.wid = window,
.parent = parent_window,
.width = geometry->width,
@@ -68,16 +69,20 @@ std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
LOG(ERROR) << "Failed to create or map window.";
return nullptr;
}
+ // Flush the connection, otherwise other Vulkan WSI calls may fail with some
+ // drivers.
+ connection->Flush();
VkSurfaceKHR vk_surface;
- VkXlibSurfaceCreateInfoKHR surface_create_info = {
- VK_STRUCTURE_TYPE_XLIB_SURFACE_CREATE_INFO_KHR};
- surface_create_info.dpy = connection->display();
- surface_create_info.window = static_cast<uint32_t>(window);
- VkResult result = vkCreateXlibSurfaceKHR(vk_instance, &surface_create_info,
- nullptr, &vk_surface);
+ const VkXcbSurfaceCreateInfoKHR surface_create_info = {
+ .sType = VK_STRUCTURE_TYPE_XCB_SURFACE_CREATE_INFO_KHR,
+ .connection = connection->XcbConnection(),
+ .window = static_cast<xcb_window_t>(window),
+ };
+ VkResult result = vkCreateXcbSurfaceKHR(vk_instance, &surface_create_info,
+ nullptr, &vk_surface);
if (VK_SUCCESS != result) {
- DLOG(ERROR) << "vkCreateXlibSurfaceKHR() failed: " << result;
+ DLOG(ERROR) << "vkCreateXcbSurfaceKHR() failed: " << result;
return nullptr;
}
return std::make_unique<VulkanSurfaceX11>(vk_instance, vk_surface,
@@ -114,8 +119,8 @@ bool VulkanSurfaceX11::Reshape(const gfx::Size& size,
DCHECK_EQ(pre_transform, gfx::OVERLAY_TRANSFORM_NONE);
auto* connection = x11::Connection::Get();
- connection->ConfigureWindow(
- {.window = window_, .width = size.width(), .height = size.height()});
+ connection->ConfigureWindow(x11::ConfigureWindowRequest{
+ .window = window_, .width = size.width(), .height = size.height()});
connection->Flush();
return VulkanSurface::Reshape(size, pre_transform);
}
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.h b/chromium/gpu/vulkan/x/vulkan_surface_x11.h
index 866105b50fe..76a0ef619ef 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.h
@@ -10,7 +10,6 @@
#include "base/macros.h"
#include "gpu/vulkan/vulkan_surface.h"
#include "ui/gfx/x/event.h"
-#include "ui/gfx/x/x11_types.h"
namespace gpu {