summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2019-07-31 15:50:41 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2019-08-30 12:35:23 +0000
commit7b2ffa587235a47d4094787d72f38102089f402a (patch)
tree30e82af9cbab08a7fa028bb18f4f2987a3f74dfa /chromium/gpu
parentd94af01c90575348c4e81a418257f254b6f8d225 (diff)
downloadqtwebengine-chromium-7b2ffa587235a47d4094787d72f38102089f402a.tar.gz
BASELINE: Update Chromium to 76.0.3809.94
Change-Id: I321c3f5f929c105aec0f98c5091ef6108822e647 Reviewed-by: Michael BrĂ¼ning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn93
-rw-r--r--chromium/gpu/DEPS2
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_completion_query.txt60
-rw-r--r--chromium/gpu/GLES2/gl2extchromium.h26
-rw-r--r--chromium/gpu/angle_end2end_tests_main.cc2
-rw-r--r--chromium/gpu/command_buffer/build_cmd_buffer_lib.py4
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py5
-rwxr-xr-xchromium/gpu/command_buffer/build_webgpu_cmd_buffer.py13
-rw-r--r--chromium/gpu/command_buffer/client/client_test_helper.h1
-rw-r--r--chromium/gpu/command_buffer/client/context_support.h4
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc16
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/client/gpu_control.h5
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.cc5
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.h1
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.h37
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h23
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.cc29
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation.h2
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h27
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h26
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface.h10
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_autogen.h8
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.cc39
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub.h43
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h22
-rw-r--r--chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h23
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn2
-rw-r--r--chromium/gpu/command_buffer/common/capabilities.h1
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer_id.h4
-rw-r--r--chromium/gpu/command_buffer/common/command_buffer_shared.h10
-rw-r--r--chromium/gpu/command_buffer/common/context_creation_attribs.h3
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle.h4
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h11
-rw-r--r--chromium/gpu/command_buffer/common/id_type.h109
-rw-r--r--chromium/gpu/command_buffer/common/id_type_unittest.cc200
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format.h1
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h105
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h51
-rw-r--r--chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn11
-rw-r--r--chromium/gpu/command_buffer/service/DEPS1
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc6
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h4
-rw-r--r--chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc634
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h97
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.cc278
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_factory.h2
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc111
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h19
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc29
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h1
-rw-r--r--chromium/gpu/command_buffer/service/feature_info_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.cc6
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager.h5
-rw-r--r--chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc35
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.cc42
-rw-r--r--chromium/gpu/command_buffer/service/gl_utils.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc13
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h3
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc27
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc78
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc79
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc111
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h5
-rw-r--r--chromium/gpu/command_buffer/service/passthrough_abstract_texture_impl.cc13
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc99
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest.cc1
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc2
-rw-r--r--chromium/gpu/command_buffer/service/sequence_id.h4
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc17
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.h3
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.cc20
-rw-r--r--chromium/gpu/command_buffer/service/service_transfer_cache.h34
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc62
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h17
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.cc6
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing.h7
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory.h5
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc260
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc112
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc19
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h2
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc23
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h8
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm231
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc33
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc99
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h30
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc4
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.h29
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.cc56
-rw-r--r--chromium/gpu/command_buffer/service/skia_utils.h17
-rw-r--r--chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc438
-rw-r--r--chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h74
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.cc160
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h38
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager_unittest.cc17
-rw-r--r--chromium/gpu/command_buffer/service/transfer_buffer_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder.cc6
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder.h4
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc142
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.h4
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc147
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc105
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.h2
-rw-r--r--chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt4
-rw-r--r--chromium/gpu/config/gpu_blacklist.cc2
-rw-r--r--chromium/gpu/config/gpu_crash_keys.cc8
-rw-r--r--chromium/gpu/config/gpu_crash_keys.h5
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.cc12
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json99
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list_unittest.cc10
-rw-r--r--chromium/gpu/config/gpu_feature_info.h28
-rw-r--r--chromium/gpu/config/gpu_feature_type.h1
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc23
-rw-r--r--chromium/gpu/config/gpu_finch_features.h11
-rw-r--r--chromium/gpu/config/gpu_info.cc47
-rw-r--r--chromium/gpu/config/gpu_info.h21
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc33
-rw-r--r--chromium/gpu/config/gpu_info_collector.h8
-rw-r--r--chromium/gpu/config/gpu_info_collector_android.cc5
-rw-r--r--chromium/gpu/config/gpu_info_collector_fuchsia.cc5
-rw-r--r--chromium/gpu/config/gpu_info_collector_linux.cc6
-rw-r--r--chromium/gpu/config/gpu_info_collector_mac.mm5
-rw-r--r--chromium/gpu/config/gpu_info_collector_unittest.cc3
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc5
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_preferences.h21
-rw-r--r--chromium/gpu/config/gpu_preferences_unittest.cc6
-rw-r--r--chromium/gpu/config/gpu_switches.cc14
-rw-r--r--chromium/gpu/config/gpu_switches.h4
-rw-r--r--chromium/gpu/config/gpu_switching.cc8
-rw-r--r--chromium/gpu/config/gpu_util.cc77
-rw-r--r--chromium/gpu/config/gpu_workaround_list.txt7
-rw-r--r--chromium/gpu/config/software_rendering_list.json16
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.cc6
-rw-r--r--chromium/gpu/gles2_conform_support/egl/context.h1
-rw-r--r--chromium/gpu/ipc/DEPS3
-rw-r--r--chromium/gpu/ipc/client/BUILD.gn2
-rw-r--r--chromium/gpu/ipc/client/DEPS2
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.cc5
-rw-r--r--chromium/gpu/ipc/client/command_buffer_proxy_impl.h1
-rw-r--r--chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc2
-rw-r--r--chromium/gpu/ipc/client/raster_in_process_context_tests.cc3
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.cc54
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.h10
-rw-r--r--chromium/gpu/ipc/command_buffer_task_executor.cc6
-rw-r--r--chromium/gpu/ipc/command_buffer_task_executor.h24
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn7
-rw-r--r--chromium/gpu/ipc/common/OWNERS2
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits.h21
-rw-r--r--chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h4
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info.mojom27
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info.typemap2
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info_struct_traits.cc5
-rw-r--r--chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h86
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom21
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.cc50
-rw-r--r--chromium/gpu/ipc/common/gpu_info_struct_traits.h32
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc6
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h19
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.mojom11
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences.typemap5
-rw-r--r--chromium/gpu/ipc/common/gpu_preferences_struct_traits.h55
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_lookup.h7
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.cc20
-rw-r--r--chromium/gpu/ipc/common/gpu_surface_tracker.h11
-rw-r--r--chromium/gpu/ipc/common/struct_traits_unittest.cc18
-rw-r--r--chromium/gpu/ipc/common/typemaps.gni1
-rw-r--r--chromium/gpu/ipc/common/vulkan_ycbcr_info.cc24
-rw-r--r--chromium/gpu/ipc/common/vulkan_ycbcr_info.h58
-rw-r--r--chromium/gpu/ipc/common/vulkan_ycbcr_info.mojom19
-rw-r--r--chromium/gpu/ipc/common/vulkan_ycbcr_info.typemap8
-rw-r--r--chromium/gpu/ipc/common/vulkan_ycbcr_info_mojom_traits.h53
-rw-r--r--chromium/gpu/ipc/gl_in_process_context.cc10
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.cc6
-rw-r--r--chromium/gpu/ipc/gpu_in_process_thread_service.h3
-rw-r--r--chromium/gpu/ipc/host/BUILD.gn2
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.cc70
-rw-r--r--chromium/gpu/ipc/host/gpu_memory_buffer_support.h2
-rw-r--r--chromium/gpu/ipc/host/gpu_switches.cc16
-rw-r--r--chromium/gpu/ipc/host/gpu_switches.h17
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.cc308
-rw-r--r--chromium/gpu/ipc/in_process_command_buffer.h38
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc6
-rw-r--r--chromium/gpu/ipc/raster_in_process_context.cc6
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn11
-rw-r--r--chromium/gpu/ipc/service/DEPS2
-rw-r--r--chromium/gpu/ipc/service/child_window_win.cc169
-rw-r--r--chromium/gpu/ipc/service/child_window_win.h47
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.cc447
-rw-r--r--chromium/gpu/ipc/service/direct_composition_child_surface_win.h95
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.cc2200
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win.h127
-rw-r--r--chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc1246
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.cc8
-rw-r--r--chromium/gpu/ipc/service/gles2_command_buffer_stub.h1
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.cc10
-rw-r--r--chromium/gpu/ipc/service/gpu_channel.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc15
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h9
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc66
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc16
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory.h7
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc27
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h9
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc6
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc80
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h4
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.cc213
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub.h8
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc102
-rw-r--r--chromium/gpu/ipc/service/image_decode_accelerator_worker.h35
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_android.cc8
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_delegate.h4
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h1
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm4
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_win.cc22
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc1
-rw-r--r--chromium/gpu/ipc/service/raster_command_buffer_stub.cc2
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.cc59
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.h6
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc24
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.h2
-rw-r--r--chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc3
-rw-r--r--chromium/gpu/ipc/webgpu_in_process_context.cc3
-rw-r--r--chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc6
-rw-r--r--chromium/gpu/vulkan/BUILD.gn8
-rw-r--r--chromium/gpu/vulkan/android/BUILD.gn1
-rw-r--r--chromium/gpu/vulkan/android/vulkan_android_unittests.cc2
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.cc116
-rw-r--r--chromium/gpu/vulkan/android/vulkan_implementation_android.h17
-rw-r--r--chromium/gpu/vulkan/demo/main.cc2
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.cc31
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.h7
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py43
-rw-r--r--chromium/gpu/vulkan/init/vulkan_factory.cc11
-rw-r--r--chromium/gpu/vulkan/init/vulkan_factory.h3
-rw-r--r--chromium/gpu/vulkan/semaphore_handle.cc2
-rw-r--r--chromium/gpu/vulkan/semaphore_handle.h8
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.cc147
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.h10
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.cc10
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.h8
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.cc127
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.h53
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc90
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc60
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h31
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.cc6
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h42
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc7
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.h6
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.cc360
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.h76
-rw-r--r--chromium/gpu/vulkan/vulkan_util.cc22
-rw-r--r--chromium/gpu/vulkan/vulkan_util.h11
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc18
-rw-r--r--chromium/gpu/vulkan/win32/vulkan_implementation_win32.h10
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc52
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.h12
270 files changed, 6114 insertions, 7106 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index 248e44bc137..034cbfcb061 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -6,6 +6,7 @@ import("//build/config/jumbo.gni")
import("//build/config/ui.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
import("//testing/test.gni")
+import("//third_party/protobuf/proto_library.gni")
import("//ui/gl/features.gni")
config("gpu_implementation") {
@@ -153,6 +154,12 @@ jumbo_static_library("test_support") {
"command_buffer/client/client_test_helper.h",
"command_buffer/client/gles2_interface_stub.cc",
"command_buffer/client/gles2_interface_stub.h",
+ "command_buffer/client/gles2_interface_stub_autogen.h",
+ "command_buffer/client/gles2_interface_stub_impl_autogen.h",
+ "command_buffer/client/webgpu_interface_stub.cc",
+ "command_buffer/client/webgpu_interface_stub.h",
+ "command_buffer/client/webgpu_interface_stub_autogen.h",
+ "command_buffer/client/webgpu_interface_stub_impl_autogen.h",
"command_buffer/service/copy_texture_chromium_mock.cc",
"command_buffer/service/copy_texture_chromium_mock.h",
"command_buffer/service/error_state_mock.cc",
@@ -171,6 +178,7 @@ jumbo_static_library("test_support") {
":gpu",
":webgpu",
"//gpu/command_buffer/client:gles2_interface",
+ "//gpu/command_buffer/client:webgpu_interface",
"//gpu/ipc:gpu_thread_holder",
]
deps = [
@@ -189,6 +197,77 @@ jumbo_static_library("test_support") {
]
}
+if (!is_android && !is_fuchsia) {
+ proto_library("gl_lpm_fuzzer_proto") {
+ sources = [
+ "command_buffer/tests/lpm/gl_lpm_fuzzer.proto",
+ ]
+
+ use_protobuf_full = true
+ deps = [
+ "//third_party/protobuf:protobuf_full",
+ ]
+ }
+
+ static_library("gl_lpm_shader_to_string") {
+ sources = [
+ "command_buffer/tests/lpm/gl_lpm_shader_to_string.cc",
+ "command_buffer/tests/lpm/gl_lpm_shader_to_string.h",
+ ]
+
+ deps = [
+ ":gl_lpm_fuzzer_proto",
+ "//base:base",
+ ]
+ }
+
+ test("gl_lpm_shader_to_string_unittest") {
+ sources = [
+ "command_buffer/tests/lpm/gl_lpm_shader_to_string_unittest.cc",
+ ]
+
+ deps = [
+ ":gl_lpm_shader_to_string",
+ "//base/test:run_all_unittests",
+ "//testing/gtest",
+ "//third_party/protobuf:protobuf_full",
+ ]
+ }
+
+ fuzzer_test("gl_lpm_fuzzer") {
+ sources = [
+ "command_buffer/tests/gl_manager.cc",
+ "command_buffer/tests/gl_manager.h",
+ "command_buffer/tests/gl_test_utils.cc",
+ "command_buffer/tests/gl_test_utils.h",
+ "command_buffer/tests/lpm/gl_lpm_fuzzer.cc",
+ ]
+
+ defines = [ "GL_GLEXT_PROTOTYPES" ]
+
+ if (is_mac) {
+ libs = [ "IOSurface.framework" ]
+ }
+
+ deps = [
+ ":gl_lpm_fuzzer_proto",
+ ":gl_lpm_shader_to_string",
+ ":gles2",
+ ":test_support",
+ "//gpu/command_buffer/client:gles2_c_lib",
+ "//gpu/command_buffer/client:gles2_implementation",
+ "//gpu/command_buffer/common:gles2_utils",
+ "//gpu/ipc:gl_in_process_context",
+ "//gpu/ipc/service:service",
+ "//testing/gtest:gtest",
+ "//third_party/libprotobuf-mutator",
+ "//ui/gfx:gfx",
+ "//ui/gl:gl",
+ "//ui/gl/init:init",
+ ]
+ }
+}
+
test("gl_tests") {
sources = [
"command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc",
@@ -238,6 +317,7 @@ test("gl_tests") {
"command_buffer/tests/gl_tests_main.cc",
"command_buffer/tests/gl_texture_mailbox_unittest.cc",
"command_buffer/tests/gl_texture_storage_unittest.cc",
+ "command_buffer/tests/gl_unallocated_texture_unittest.cc",
"command_buffer/tests/gl_unittest.cc",
"command_buffer/tests/gl_unittests_android.cc",
"command_buffer/tests/gl_virtual_contexts_ext_window_rectangles_unittest.cc",
@@ -249,12 +329,12 @@ test("gl_tests") {
"ipc/client/gpu_context_tests.h",
"ipc/client/gpu_in_process_context_tests.cc",
"ipc/client/raster_in_process_context_tests.cc",
- "ipc/service/direct_composition_surface_win_unittest.cc",
]
if (use_dawn) {
sources += [
"command_buffer/tests/webgpu_fence_unittest.cc",
+ "command_buffer/tests/webgpu_mailbox_unittest.cc",
"command_buffer/tests/webgpu_test.cc",
"command_buffer/tests/webgpu_test.h",
]
@@ -281,13 +361,16 @@ test("gl_tests") {
"//gpu/ipc:gl_in_process_context",
"//gpu/ipc/host",
"//gpu/ipc/service",
+ "//mojo/core/embedder",
"//testing/gmock",
"//testing/gtest",
"//third_party/angle:translator",
+ "//third_party/libyuv",
"//ui/gfx",
"//ui/gfx:test_support",
"//ui/gfx/geometry",
"//ui/gl",
+ "//ui/gl:test_support",
"//ui/gl/init",
]
@@ -353,7 +436,6 @@ test("gpu_unittests") {
"command_buffer/common/gles2_cmd_format_test_autogen.h",
"command_buffer/common/gles2_cmd_utils_unittest.cc",
"command_buffer/common/id_allocator_test.cc",
- "command_buffer/common/id_type_unittest.cc",
"command_buffer/common/raster_cmd_format_test.cc",
"command_buffer/common/raster_cmd_format_test_autogen.h",
"command_buffer/common/unittest_main.cc",
@@ -458,9 +540,14 @@ test("gpu_unittests") {
"ipc/service/gpu_channel_test_common.cc",
"ipc/service/gpu_channel_test_common.h",
"ipc/service/gpu_channel_unittest.cc",
- "ipc/service/image_decode_accelerator_stub_unittest.cc",
]
+ if (is_chromeos) {
+ # Image decode acceleration with hardware is only supported in Chrome OS.
+ # The intention is to run this test in the linux-chromeos build.
+ sources += [ "ipc/service/image_decode_accelerator_stub_unittest.cc" ]
+ }
+
if (use_dawn) {
sources += [ "command_buffer/service/webgpu_decoder_unittest.cc" ]
}
diff --git a/chromium/gpu/DEPS b/chromium/gpu/DEPS
index 020fd3a5d3f..37916e4419c 100644
--- a/chromium/gpu/DEPS
+++ b/chromium/gpu/DEPS
@@ -5,7 +5,7 @@ include_rules = [
"+third_party/re2",
"+third_party/smhasher",
"+third_party/swiftshader",
- "+third_party/protbuf",
+ "+third_party/protobuf",
"+third_party/zlib",
"+crypto",
"+ui/gfx",
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_completion_query.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_completion_query.txt
new file mode 100644
index 00000000000..3283304124c
--- /dev/null
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_completion_query.txt
@@ -0,0 +1,60 @@
+Name
+
+ CHROMIUM_completion_query
+
+Name Strings
+
+ GL_CHROMIUM_completion_query
+
+Version
+
+ Last Modified Date: May 7, 2019
+
+Dependencies
+
+ OpenGL ES 2.0 is required.
+
+ GL_KHR_parallel_shader_compile is required.
+
+Overview
+
+ This extension provides a same query mechanism as the COMPLETION_STATUS_KHR
+ in GL_KHR_parallel_shader_compile, which indicates whether the program
+ linking or shader compilation has completed. The major advantage of this
+ query is that it doesn't incurs an expensive round-trip to the GPU thread.
+ So it's much cheaper for polling. You can use it this way:
+ glBeginQueryEXT(PROGRAM_COMPLETION_QUERY_CHROMIUM, query);
+ glLinkProgram(program);
+ glEndQueryEXT(PROGRAM_COMPLETION_QUERY_CHROMIUM);
+ GLuint available = 0u;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT_AVAILABLE, &available);
+ if (available)
+ {
+ GLuint result = 0u;
+ glGetQueryObjectuivEXT(query, GL_QUERY_RESULT, &result);
+ }
+ If 'available' returns true, that's equivalent to COMPLETION_STATUS_KHR
+ returning true. Then LINK_STATUS can be obtained from 'result'.
+
+New Procedures and Functions
+
+ None.
+
+Errors
+
+ None.
+
+New Tokens
+
+ Accepted by the <target> parameter of BeginQueryEXT, EndQueryEXT,
+ and GetQueryObjectuivEXT:
+
+ PROGRAM_COMPLETION_QUERY_CHROMIUM 0x6009
+
+New State
+
+ None.
+
+Revision History
+
+ 5/3/2019 Documented the extension
diff --git a/chromium/gpu/GLES2/gl2extchromium.h b/chromium/gpu/GLES2/gl2extchromium.h
index ad7e3d0a6fd..58c61f42207 100644
--- a/chromium/gpu/GLES2/gl2extchromium.h
+++ b/chromium/gpu/GLES2/gl2extchromium.h
@@ -300,23 +300,23 @@ typedef void (GL_APIENTRYP PFNGLBLITFRAMEBUFFERCHROMIUMPROC) (GLint srcX0, GLint
#endif
#endif /* GL_CHROMIUM_framebuffer_multisample */
-/* GL_CHROMIUM_texture_compression_dxt3 */
-#ifndef GL_CHROMIUM_texture_compression_dxt3
-#define GL_CHROMIUM_texture_compression_dxt3 1
+/* GL_ANGLE_texture_compression_dxt3 */
+#ifndef GL_ANGLE_texture_compression_dxt3
+#define GL_ANGLE_texture_compression_dxt3 1
#ifndef GL_COMPRESSED_RGBA_S3TC_DXT3_EXT
#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
#endif
-#endif /* GL_CHROMIUM_texture_compression_dxt3 */
+#endif /* GL_ANGLE_texture_compression_dxt3 */
-/* GL_CHROMIUM_texture_compression_dxt5 */
-#ifndef GL_CHROMIUM_texture_compression_dxt5
-#define GL_CHROMIUM_texture_compression_dxt5 1
+/* GL_ANGLE_texture_compression_dxt5 */
+#ifndef GL_ANGLE_texture_compression_dxt5
+#define GL_ANGLE_texture_compression_dxt5 1
#ifndef GL_COMPRESSED_RGBA_S3TC_DXT5_EXT
#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
#endif
-#endif /* GL_CHROMIUM_texture_compression_dxt5 */
+#endif /* GL_ANGLE_texture_compression_dxt5 */
/* GL_CHROMIUM_async_pixel_transfers */
#ifndef GL_CHROMIUM_async_pixel_transfers
@@ -1287,6 +1287,16 @@ typedef void(GL_APIENTRYP PFNGLUNPREMULTIPLYANDDITHERCOPYCHROMIUMPROC)(
#define GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM 0x8AF7
#endif /* GL_CHROMIUM_shared_image */
+/* GL_CHROMIUM_program_completion_query */
+#ifndef GL_CHROMIUM_program_completion_query
+#define GL_CHROMIUM_program_completion_query 1
+
+#ifndef GL_PROGRAM_COMPLETION_QUERY_CHROMIUM
+// TODO(jie.a.chen@intel.com): Get official numbers for this constants.
+#define GL_PROGRAM_COMPLETION_QUERY_CHROMIUM 0x6009
+#endif
+#endif /* GL_CHROMIUM_program_completion_query */
+
#ifdef __cplusplus
}
#endif
diff --git a/chromium/gpu/angle_end2end_tests_main.cc b/chromium/gpu/angle_end2end_tests_main.cc
index e6fde851cd4..5d9353cbb36 100644
--- a/chromium/gpu/angle_end2end_tests_main.cc
+++ b/chromium/gpu/angle_end2end_tests_main.cc
@@ -24,8 +24,8 @@ void ANGLEProcessTestArgs(int *argc, char *argv[]);
int main(int argc, char** argv) {
base::CommandLine::Init(argc, argv);
- testing::InitGoogleMock(&argc, argv);
ANGLEProcessTestArgs(&argc, argv);
+ testing::InitGoogleMock(&argc, argv);
base::TestSuite test_suite(argc, argv);
int rt = base::LaunchUnitTestsWithOptions(
argc, argv,
diff --git a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
index 65915b7b1d7..9d66d120638 100644
--- a/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
+++ b/chromium/gpu/command_buffer/build_cmd_buffer_lib.py
@@ -1485,8 +1485,8 @@ TEST_P(%(test_name)s, %(name)sInvalidArgs%(arg_index)d_%(value_index)d) {
args = func.GetOriginalArgs()
arg_string = ", ".join(
["%s /* %s */" % (arg.type, arg.name) for arg in args])
- f.write("%s GLES2InterfaceStub::%s(%s) {\n" %
- (func.return_type, func.original_name, arg_string))
+ f.write("%s %sInterfaceStub::%s(%s) {\n" %
+ (func.return_type, _prefix, func.original_name, arg_string))
if func.return_type != "void":
f.write(" return 0;\n")
f.write("}\n")
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 255f6eb57f5..e830f36dcb8 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -301,12 +301,8 @@ _NAMED_TYPE_INFO = {
'GL_TRANSFORM_FEEDBACK_ACTIVE',
'GL_TRANSFORM_FEEDBACK_BUFFER_BINDING',
'GL_TRANSFORM_FEEDBACK_PAUSED',
- 'GL_TRANSFORM_FEEDBACK_BUFFER_SIZE',
- 'GL_TRANSFORM_FEEDBACK_BUFFER_START',
'GL_UNIFORM_BUFFER_BINDING',
'GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT',
- 'GL_UNIFORM_BUFFER_SIZE',
- 'GL_UNIFORM_BUFFER_START',
'GL_UNPACK_IMAGE_HEIGHT',
'GL_UNPACK_ROW_LENGTH',
'GL_UNPACK_SKIP_IMAGES',
@@ -748,6 +744,7 @@ _NAMED_TYPE_INFO = {
'GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM',
'GL_COMMANDS_COMPLETED_CHROMIUM',
'GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM',
+ 'GL_PROGRAM_COMPLETION_QUERY_CHROMIUM',
],
},
'RenderBufferParameter': {
diff --git a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
index f1067938a5f..e8f179a1669 100755
--- a/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_webgpu_cmd_buffer.py
@@ -42,9 +42,16 @@ _FUNCTION_INFO = {
'commands': 'size * sizeof(char)',
},
},
+ 'AssociateMailbox': {
+ 'type': 'PUT',
+ 'count': 16, # GL_MAILBOX_SIZE_CHROMIUM
+ 'trace_level': 1,
+ },
+ 'DissociateMailbox': {
+ 'trace_level': 1,
+ },
}
-
def main(argv):
"""This is the main function."""
parser = OptionParser()
@@ -89,6 +96,10 @@ def main(argv):
"gpu/command_buffer/client/webgpu_interface_autogen.h")
gen.WriteGLES2ImplementationHeader(
"gpu/command_buffer/client/webgpu_implementation_autogen.h")
+ gen.WriteGLES2InterfaceStub(
+ "gpu/command_buffer/client/webgpu_interface_stub_autogen.h")
+ gen.WriteGLES2InterfaceStubImpl(
+ "gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h")
gen.WriteGLES2Implementation(
"gpu/command_buffer/client/webgpu_implementation_impl_autogen.h")
gen.WriteGLES2ImplementationUnitTests(
diff --git a/chromium/gpu/command_buffer/client/client_test_helper.h b/chromium/gpu/command_buffer/client/client_test_helper.h
index 75ccd661a01..50d3b601b0c 100644
--- a/chromium/gpu/command_buffer/client/client_test_helper.h
+++ b/chromium/gpu/command_buffer/client/client_test_helper.h
@@ -143,6 +143,7 @@ class MockClientGpuControl : public GpuControl {
void GetGpuFence(uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
callback) override {}
+ MOCK_METHOD1(SetDisplayTransform, void(gfx::OverlayTransform));
private:
DISALLOW_COPY_AND_ASSIGN(MockClientGpuControl);
diff --git a/chromium/gpu/command_buffer/client/context_support.h b/chromium/gpu/command_buffer/client/context_support.h
index 48e2b80cf85..3da9d4a5f8d 100644
--- a/chromium/gpu/command_buffer/client/context_support.h
+++ b/chromium/gpu/command_buffer/client/context_support.h
@@ -154,6 +154,10 @@ class ContextSupport {
virtual void DidCallGLFromSkia() = 0;
+ // Notifies the onscreen surface of the display transform applied to the swaps
+ // from the client.
+ virtual void SetDisplayTransform(gfx::OverlayTransform transform) = 0;
+
protected:
ContextSupport() = default;
virtual ~ContextSupport() = default;
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index bb4a599a9a3..1784043bfdc 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -377,8 +377,12 @@ void GLES2Implementation::OnGpuControlSwapBuffersCompleted(
if (found == pending_swap_callbacks_.end())
return;
- std::move(found->second).Run(params);
+ // Erase the entry before running the callback to guard against the callback
+ // mutating the |pending_swap_callbacks_|.
+ auto callback = std::move(found->second);
pending_swap_callbacks_.erase(found);
+
+ std::move(callback).Run(params);
}
void GLES2Implementation::SendErrorMessage(std::string message, int32_t id) {
@@ -416,8 +420,13 @@ void GLES2Implementation::OnSwapBufferPresented(
auto found = pending_presentation_callbacks_.find(swap_id);
if (found == pending_presentation_callbacks_.end())
return;
- std::move(found->second).Run(feedback);
+
+ // Erase the entry before running the callback to guard against the callback
+ // mutating the |pending_presentation_callbacks_|.
+ auto callback = std::move(found->second);
pending_presentation_callbacks_.erase(found);
+
+ std::move(callback).Run(feedback);
}
void GLES2Implementation::OnGpuControlReturnData(
@@ -6078,6 +6087,7 @@ void GLES2Implementation::BeginQueryEXT(GLenum target, GLuint id) {
case GL_LATENCY_QUERY_CHROMIUM:
case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
case GL_GET_ERROR_QUERY_CHROMIUM:
+ case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
break;
case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM:
case GL_COMMANDS_COMPLETED_CHROMIUM:
@@ -6446,6 +6456,7 @@ GLuint GLES2Implementation::CreateAndTexStorage2DSharedImageCHROMIUM(
const Mailbox& mailbox = *reinterpret_cast<const Mailbox*>(mailbox_data);
DCHECK(mailbox.Verify()) << "CreateAndTexStorage2DSharedImageCHROMIUM was "
"passed an invalid mailbox.";
+ DCHECK(mailbox.IsSharedImage());
GLuint client_id;
GetIdHandler(SharedIdNamespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(client_id, GL_NONE,
@@ -6470,6 +6481,7 @@ GLES2Implementation::CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM(
DCHECK(mailbox.Verify())
<< "CreateAndTexStorage2DSharedImageWithInternalFormatCHROMIUM was "
"passed an invalid mailbox.";
+ DCHECK(mailbox.IsSharedImage());
GLuint client_id;
GetIdHandler(SharedIdNamespaces::kTextures)->MakeIds(this, 0, 1, &client_id);
helper_->CreateAndTexStorage2DSharedImageINTERNALImmediate(
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
index 77861ecf06d..173c599ed54 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest.cc
@@ -3586,7 +3586,7 @@ TEST_F(GLES2ImplementationTest, CreateAndTexStorage2DSharedImageCHROMIUM) {
GLbyte data[GL_MAILBOX_SIZE_CHROMIUM];
};
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
Cmds expected;
expected.cmd.Init(kTexturesStartId, GL_NONE, mailbox.name);
GLuint id = gl_->CreateAndTexStorage2DSharedImageCHROMIUM(mailbox.name);
@@ -3601,7 +3601,7 @@ TEST_F(GLES2ImplementationTest,
GLbyte data[GL_MAILBOX_SIZE_CHROMIUM];
};
- Mailbox mailbox = Mailbox::Generate();
+ Mailbox mailbox = Mailbox::GenerateForSharedImage();
const GLenum kFormat = GL_RGBA;
Cmds expected;
expected.cmd.Init(kTexturesStartId, kFormat, mailbox.name);
diff --git a/chromium/gpu/command_buffer/client/gpu_control.h b/chromium/gpu/command_buffer/client/gpu_control.h
index 61c8c3471bf..9bcdb6130f5 100644
--- a/chromium/gpu/command_buffer/client/gpu_control.h
+++ b/chromium/gpu/command_buffer/client/gpu_control.h
@@ -17,6 +17,7 @@
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/gpu_export.h"
+#include "ui/gfx/overlay_transform.h"
extern "C" typedef struct _ClientBuffer* ClientBuffer;
extern "C" typedef struct _ClientGpuFence* ClientGpuFence;
@@ -114,6 +115,10 @@ class GPU_EXPORT GpuControl {
// first so does not need to be flushed.
virtual bool CanWaitUnverifiedSyncToken(const SyncToken& sync_token) = 0;
+ // Notifies the onscreen surface of the display transform applied to the swaps
+ // from the client.
+ virtual void SetDisplayTransform(gfx::OverlayTransform transform) = 0;
+
private:
DISALLOW_COPY_AND_ASSIGN(GpuControl);
};
diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc
index d4d53226ad0..1adb73b0251 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.cc
+++ b/chromium/gpu/command_buffer/client/implementation_base.cc
@@ -416,4 +416,9 @@ void ImplementationBase::DidCallGLFromSkia() {
NOTREACHED();
}
+void ImplementationBase::SetDisplayTransform(gfx::OverlayTransform transform) {
+ helper_->Flush();
+ gpu_control_->SetDisplayTransform(transform);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/implementation_base.h b/chromium/gpu/command_buffer/client/implementation_base.h
index 82a5b96540b..8a302e6eaab 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.h
+++ b/chromium/gpu/command_buffer/client/implementation_base.h
@@ -84,6 +84,7 @@ class GLES2_IMPL_EXPORT ImplementationBase
bool HasGrContextSupport() const override;
void WillCallGLFromSkia() override;
void DidCallGLFromSkia() override;
+ void SetDisplayTransform(gfx::OverlayTransform transform) override;
// base::trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
index 7bb473935e6..a06895e0f59 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles_unittest.cc
@@ -228,6 +228,7 @@ class ContextSupportStub : public ContextSupport {
void SetGrContext(GrContext* gr) override {}
void WillCallGLFromSkia() override {}
void DidCallGLFromSkia() override {}
+ void SetDisplayTransform(gfx::OverlayTransform transform) override {}
private:
std::unique_ptr<char[]> mapped_transfer_cache_entry_;
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.h b/chromium/gpu/command_buffer/client/shared_image_interface.h
index 8b80c01a9dc..0db8109c414 100644
--- a/chromium/gpu/command_buffer/client/shared_image_interface.h
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.h
@@ -7,6 +7,7 @@
#include "base/compiler_specific.h"
#include "base/containers/span.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/sync_token.h"
@@ -20,11 +21,11 @@ class Size;
namespace gpu {
class GpuMemoryBufferManager;
-// An interface to create shared images that can be imported into other APIs.
-// This interface is thread-safe and (essentially) stateless. It is asynchronous
-// in the same sense as GLES2Interface or RasterInterface in that commands are
-// executed asynchronously on the service side, but can be synchronized using
-// SyncTokens. See //docs/design/gpu_synchronization.md.
+// An interface to create shared images and swap chains that can be imported
+// into other APIs. This interface is thread-safe and (essentially) stateless.
+// It is asynchronous in the same sense as GLES2Interface or RasterInterface in
+// that commands are executed asynchronously on the service side, but can be
+// synchronized using SyncTokens. See //docs/design/gpu_synchronization.md.
class SharedImageInterface {
public:
virtual ~SharedImageInterface() {}
@@ -90,6 +91,32 @@ class SharedImageInterface {
virtual void DestroySharedImage(const SyncToken& sync_token,
const Mailbox& mailbox) = 0;
+#if defined(OS_WIN)
+ struct SwapChainMailboxes {
+ Mailbox front_buffer;
+ Mailbox back_buffer;
+ };
+
+ // Creates a swap chain.
+ // Returns mailboxes for front and back buffers of a DXGI Swap Chain that can
+ // be imported into GL command buffer using shared image functions (e.g.
+ // GLES2Interface::CreateAndTexStorage2DSharedImageCHROMIUM) or (deprecated)
+ // mailbox functions (e.g. GLES2Interface::CreateAndConsumeTextureCHROMIUM).
+ virtual SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) = 0;
+
+ // Swaps front and back buffer of a swap chain. Back buffer mailbox still
+ // refers to the back buffer of the swap chain after calling PresentSwapChain.
+ // The mailbox argument should be back buffer mailbox. Sync token is required
+ // for synchronization between shared image stream and command buffer stream,
+ // to ensure that all the rendering commands to a frame are executed before
+ // presenting the swap chain.
+ virtual void PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) = 0;
+#endif // OS_WIN
+
// Generates an unverified SyncToken that is released after all previous
// commands on this interface have executed on the service side.
virtual SyncToken GenUnverifiedSyncToken() = 0;
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
index 002bda4f0ed..8ca8609db9e 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -20,6 +20,7 @@
using ::testing::_;
using ::testing::AtMost;
+using ::testing::DoAll;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::SetArgPointee;
diff --git a/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h
index f94bf3d105e..a90c77a14cf 100644
--- a/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_cmd_helper_autogen.h
@@ -20,4 +20,27 @@ void DawnCommands(uint32_t commands_shm_id,
}
}
+void AssociateMailboxImmediate(GLuint device_id,
+ GLuint device_generation,
+ GLuint id,
+ GLuint generation,
+ GLuint usage,
+ const GLbyte* mailbox) {
+ const uint32_t size = webgpu::cmds::AssociateMailboxImmediate::ComputeSize();
+ webgpu::cmds::AssociateMailboxImmediate* c =
+ GetImmediateCmdSpaceTotalSize<webgpu::cmds::AssociateMailboxImmediate>(
+ size);
+ if (c) {
+ c->Init(device_id, device_generation, id, generation, usage, mailbox);
+ }
+}
+
+void DissociateMailbox(GLuint texture_id, GLuint texture_generation) {
+ webgpu::cmds::DissociateMailbox* c =
+ GetCmdSpace<webgpu::cmds::DissociateMailbox>();
+ if (c) {
+ c->Init(texture_id, texture_generation);
+ }
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_CMD_HELPER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.cc b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
index c93978f507d..7d875ed907e 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.cc
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.cc
@@ -8,6 +8,7 @@
#include <vector>
#include "base/numerics/checked_math.h"
+#include "base/trace_event/trace_event.h"
#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/client/shared_memory_limits.h"
@@ -195,6 +196,14 @@ void WebGPUImplementation::OnSwapBufferPresented(
void WebGPUImplementation::OnGpuControlReturnData(
base::span<const uint8_t> data) {
#if BUILDFLAG(USE_DAWN)
+
+ static uint32_t return_trace_id = 0;
+ TRACE_EVENT_FLOW_END0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "DawnReturnCommands", return_trace_id++);
+
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUImplementation::OnGpuControlReturnData", "bytes",
+ data.size());
if (!wire_client_->HandleCommands(
reinterpret_cast<const char*>(data.data()), data.size())) {
// TODO(enga): Lose the context.
@@ -229,6 +238,8 @@ void* WebGPUImplementation::GetCmdSpace(size_t size) {
uint32_t allocation_size =
std::max(c2s_buffer_default_size_, static_cast<uint32_t>(size));
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUImplementation::GetCmdSpace", "bytes", allocation_size);
c2s_buffer_.Reset(allocation_size);
c2s_put_offset_ = 0;
next_offset = size;
@@ -248,6 +259,14 @@ void* WebGPUImplementation::GetCmdSpace(size_t size) {
bool WebGPUImplementation::Flush() {
if (c2s_buffer_.valid()) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUImplementation::Flush", "bytes", c2s_put_offset_);
+
+ TRACE_EVENT_FLOW_BEGIN0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "DawnCommands",
+ (static_cast<uint64_t>(c2s_buffer_.shm_id()) << 32) +
+ c2s_buffer_.offset());
+
c2s_buffer_.Shrink(c2s_put_offset_);
helper_->DawnCommands(c2s_buffer_.shm_id(), c2s_buffer_.offset(),
c2s_put_offset_);
@@ -278,5 +297,15 @@ DawnDevice WebGPUImplementation::GetDefaultDevice() {
#endif
}
+ReservedTexture WebGPUImplementation::ReserveTexture(DawnDevice device) {
+#if BUILDFLAG(USE_DAWN)
+ dawn_wire::ReservedTexture reservation = wire_client_->ReserveTexture(device);
+ return {reservation.texture, reservation.id, reservation.generation};
+#else
+ NOTREACHED();
+ return {};
+#endif
+}
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation.h b/chromium/gpu/command_buffer/client/webgpu_implementation.h
index 3032f9980e9..fe7adebd300 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation.h
@@ -114,9 +114,11 @@ class WEBGPU_EXPORT WebGPUImplementation final
const DawnProcTable& GetProcs() const override;
void FlushCommands() override;
DawnDevice GetDefaultDevice() override;
+ ReservedTexture ReserveTexture(DawnDevice device) override;
private:
const char* GetLogPrefix() const { return "webgpu"; }
+ void CheckGLError() {}
WebGPUCmdHelper* helper_;
#if BUILDFLAG(USE_DAWN)
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h
index fe65c4aa8ef..1e299de566d 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_autogen.h
@@ -13,4 +13,13 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_
+void AssociateMailbox(GLuint device_id,
+ GLuint device_generation,
+ GLuint id,
+ GLuint generation,
+ GLuint usage,
+ const GLbyte* mailbox) override;
+
+void DissociateMailbox(GLuint texture_id, GLuint texture_generation) override;
+
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h
index 06f94d450ad..c5f555eca66 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_impl_autogen.h
@@ -13,4 +13,31 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_IMPL_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_IMPL_AUTOGEN_H_
+void WebGPUImplementation::AssociateMailbox(GLuint device_id,
+ GLuint device_generation,
+ GLuint id,
+ GLuint generation,
+ GLuint usage,
+ const GLbyte* mailbox) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] wgAssociateMailbox(" << device_id
+ << ", " << device_generation << ", " << id << ", "
+ << generation << ", " << usage << ", "
+ << static_cast<const void*>(mailbox) << ")");
+ uint32_t count = 16;
+ for (uint32_t ii = 0; ii < count; ++ii)
+ GPU_CLIENT_LOG("value[" << ii << "]: " << mailbox[ii]);
+ helper_->AssociateMailboxImmediate(device_id, device_generation, id,
+ generation, usage, mailbox);
+ CheckGLError();
+}
+
+void WebGPUImplementation::DissociateMailbox(GLuint texture_id,
+ GLuint texture_generation) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] wgDissociateMailbox(" << texture_id
+ << ", " << texture_generation << ")");
+ helper_->DissociateMailbox(texture_id, texture_generation);
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h
index 12aa0d1f975..66a91a31401 100644
--- a/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h
@@ -13,4 +13,30 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
+TEST_F(WebGPUImplementationTest, AssociateMailbox) {
+ GLbyte data[16] = {0};
+ struct Cmds {
+ cmds::AssociateMailboxImmediate cmd;
+ GLbyte data[16];
+ };
+
+ for (int jj = 0; jj < 16; ++jj) {
+ data[jj] = static_cast<GLbyte>(jj);
+ }
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5, &data[0]);
+ gl_->AssociateMailbox(1, 2, 3, 4, 5, &data[0]);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(WebGPUImplementationTest, DissociateMailbox) {
+ struct Cmds {
+ cmds::DissociateMailbox cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->DissociateMailbox(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface.h b/chromium/gpu/command_buffer/client/webgpu_interface.h
index 480752a1771..5cdf6b5238f 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface.h
@@ -9,12 +9,15 @@
#include "gpu/command_buffer/client/interface_base.h"
-extern "C" typedef struct _ClientBuffer* ClientBuffer;
-extern "C" typedef struct _GLColorSpace* GLColorSpace;
-
namespace gpu {
namespace webgpu {
+struct ReservedTexture {
+ DawnTexture texture;
+ uint32_t id;
+ uint32_t generation;
+};
+
class WebGPUInterface : public InterfaceBase {
public:
WebGPUInterface() {}
@@ -23,6 +26,7 @@ class WebGPUInterface : public InterfaceBase {
virtual const DawnProcTable& GetProcs() const = 0;
virtual void FlushCommands() = 0;
virtual DawnDevice GetDefaultDevice() = 0;
+ virtual ReservedTexture ReserveTexture(DawnDevice device) = 0;
// Include the auto-generated part of this class. We split this because
// it means we can easily edit the non-auto generated parts right here in
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h
index e357cc2ae22..92784474d30 100644
--- a/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_autogen.h
@@ -13,4 +13,12 @@
#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_
+virtual void AssociateMailbox(GLuint device_id,
+ GLuint device_generation,
+ GLuint id,
+ GLuint generation,
+ GLuint usage,
+ const GLbyte* mailbox) = 0;
+virtual void DissociateMailbox(GLuint texture_id,
+ GLuint texture_generation) = 0;
#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
new file mode 100644
index 00000000000..c0163e33dfc
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.cc
@@ -0,0 +1,39 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/client/webgpu_interface_stub.h"
+
+namespace gpu {
+namespace webgpu {
+
+WebGPUInterfaceStub::WebGPUInterfaceStub() = default;
+
+WebGPUInterfaceStub::~WebGPUInterfaceStub() = default;
+
+// InterfaceBase implementation.
+void WebGPUInterfaceStub::GenSyncTokenCHROMIUM(GLbyte* sync_token) {}
+void WebGPUInterfaceStub::GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) {}
+void WebGPUInterfaceStub::VerifySyncTokensCHROMIUM(GLbyte** sync_tokens,
+ GLsizei count) {}
+void WebGPUInterfaceStub::WaitSyncTokenCHROMIUM(const GLbyte* sync_token) {}
+
+// WebGPUInterface implementation
+const DawnProcTable& WebGPUInterfaceStub::GetProcs() const {
+ return null_procs_;
+}
+void WebGPUInterfaceStub::FlushCommands() {}
+DawnDevice WebGPUInterfaceStub::GetDefaultDevice() {
+ return nullptr;
+}
+ReservedTexture WebGPUInterfaceStub::ReserveTexture(DawnDevice device) {
+ return {nullptr, 0, 0};
+}
+
+// Include the auto-generated part of this class. We split this because
+// it means we can easily edit the non-auto generated parts right here in
+// this file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h"
+
+} // namespace webgpu
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
new file mode 100644
index 00000000000..e16a57ec132
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_H_
+#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_H_
+
+#include "gpu/command_buffer/client/webgpu_interface.h"
+
+namespace gpu {
+namespace webgpu {
+
+// This class a stub to help with mocks for the WebGPUInterface class.
+class WebGPUInterfaceStub : public WebGPUInterface {
+ public:
+ WebGPUInterfaceStub();
+ ~WebGPUInterfaceStub() override;
+
+ // InterfaceBase implementation.
+ void GenSyncTokenCHROMIUM(GLbyte* sync_token) override;
+ void GenUnverifiedSyncTokenCHROMIUM(GLbyte* sync_token) override;
+ void VerifySyncTokensCHROMIUM(GLbyte** sync_tokens, GLsizei count) override;
+ void WaitSyncTokenCHROMIUM(const GLbyte* sync_token) override;
+
+ // WebGPUInterface implementation
+ const DawnProcTable& GetProcs() const override;
+ void FlushCommands() override;
+ DawnDevice GetDefaultDevice() override;
+ ReservedTexture ReserveTexture(DawnDevice device) override;
+
+// Include the auto-generated part of this class. We split this because
+// it means we can easily edit the non-auto generated parts right here in
+// this file instead of having to edit some template or the code generator.
+#include "gpu/command_buffer/client/webgpu_interface_stub_autogen.h"
+
+ private:
+ DawnProcTable null_procs_;
+};
+
+} // namespace webgpu
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h
new file mode 100644
index 00000000000..6d23a6873e3
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub_autogen.h
@@ -0,0 +1,22 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_webgpu_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface_stub.h.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_
+
+void AssociateMailbox(GLuint device_id,
+ GLuint device_generation,
+ GLuint id,
+ GLuint generation,
+ GLuint usage,
+ const GLbyte* mailbox) override;
+void DissociateMailbox(GLuint texture_id, GLuint texture_generation) override;
+#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h
new file mode 100644
index 00000000000..0d89b6896a4
--- /dev/null
+++ b/chromium/gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h
@@ -0,0 +1,23 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is auto-generated from
+// gpu/command_buffer/build_webgpu_cmd_buffer.py
+// It's formatted by clang-format using chromium coding style:
+// clang-format -i -style=chromium filename
+// DO NOT EDIT!
+
+// This file is included by gles2_interface_stub.cc.
+#ifndef GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_
+#define GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_
+
+void WebGPUInterfaceStub::AssociateMailbox(GLuint /* device_id */,
+ GLuint /* device_generation */,
+ GLuint /* id */,
+ GLuint /* generation */,
+ GLuint /* usage */,
+ const GLbyte* /* mailbox */) {}
+void WebGPUInterfaceStub::DissociateMailbox(GLuint /* texture_id */,
+ GLuint /* texture_generation */) {}
+#endif // GPU_COMMAND_BUFFER_CLIENT_WEBGPU_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index 260d231d265..898861835f6 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -82,7 +82,6 @@ jumbo_source_set("common_sources") {
"gpu_memory_buffer_support.h",
"id_allocator.cc",
"id_allocator.h",
- "id_type.h",
"mailbox.cc",
"mailbox.h",
"mailbox_holder.cc",
@@ -107,6 +106,7 @@ jumbo_source_set("common_sources") {
configs += [ "//gpu:gpu_implementation" ]
public_deps = [
+ "//base/util/type_safety",
"//mojo/public/cpp/system",
"//ui/gfx:memory_buffer",
"//ui/gfx/geometry",
diff --git a/chromium/gpu/command_buffer/common/capabilities.h b/chromium/gpu/command_buffer/common/capabilities.h
index 0a03c8e6642..b9ea9011915 100644
--- a/chromium/gpu/command_buffer/common/capabilities.h
+++ b/chromium/gpu/command_buffer/common/capabilities.h
@@ -171,6 +171,7 @@ struct GPU_EXPORT Capabilities {
bool dc_layers = false;
bool use_dc_overlays_for_video = false;
bool protected_video_swap_chain = false;
+ bool gpu_vsync = false;
// When this parameter is true, a CHROMIUM image created with RGB format will
// actually have RGBA format. The client is responsible for handling most of
diff --git a/chromium/gpu/command_buffer/common/command_buffer_id.h b/chromium/gpu/command_buffer/common/command_buffer_id.h
index 99b5bb8621a..d3cacb2d18e 100644
--- a/chromium/gpu/command_buffer/common/command_buffer_id.h
+++ b/chromium/gpu/command_buffer/common/command_buffer_id.h
@@ -5,12 +5,12 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_ID_H_
#define GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_ID_H_
-#include "gpu/command_buffer/common/id_type.h"
+#include "base/util/type_safety/id_type.h"
namespace gpu {
class CommandBuffer;
-using CommandBufferId = gpu::IdTypeU64<CommandBuffer>;
+using CommandBufferId = util::IdTypeU64<CommandBuffer>;
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/command_buffer_shared.h b/chromium/gpu/command_buffer/common/command_buffer_shared.h
index fafe767bdb1..97dbed18699 100644
--- a/chromium/gpu/command_buffer/common/command_buffer_shared.h
+++ b/chromium/gpu/command_buffer/common/command_buffer_shared.h
@@ -5,6 +5,8 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_SHARED_H_
#define GPU_COMMAND_BUFFER_COMMON_COMMAND_BUFFER_SHARED_H_
+#include <atomic>
+
#include "command_buffer.h"
#include "base/atomicops.h"
@@ -31,7 +33,7 @@ public:
base::subtle::NoBarrier_Store(&latest_, 0);
base::subtle::NoBarrier_Store(&slots_[0], 0);
base::subtle::Release_Store(&slots_[1], 0);
- base::subtle::MemoryBarrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
}
void Write(const T& state) {
@@ -40,16 +42,16 @@ public:
states_[towrite][index] = state;
base::subtle::Release_Store(&slots_[towrite], index);
base::subtle::Release_Store(&latest_, towrite);
- base::subtle::MemoryBarrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
}
// Attempt to update the state, updating only if the generation counter is
// newer.
void Read(T* state) {
- base::subtle::MemoryBarrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
int toread = !!base::subtle::Acquire_Load(&latest_);
base::subtle::Release_Store(&reading_, toread);
- base::subtle::MemoryBarrier();
+ std::atomic_thread_fence(std::memory_order_seq_cst);
int index = !!base::subtle::Acquire_Load(&slots_[toread]);
if (states_[toread][index].generation - state->generation < 0x80000000U)
*state = states_[toread][index];
diff --git a/chromium/gpu/command_buffer/common/context_creation_attribs.h b/chromium/gpu/command_buffer/common/context_creation_attribs.h
index 7c8ba03cde5..a6516327472 100644
--- a/chromium/gpu/command_buffer/common/context_creation_attribs.h
+++ b/chromium/gpu/command_buffer/common/context_creation_attribs.h
@@ -44,7 +44,7 @@ struct GPU_EXPORT ContextCreationAttribs {
ContextCreationAttribs& operator=(const ContextCreationAttribs& other);
gfx::Size offscreen_framebuffer_size;
- gl::GpuPreference gpu_preference = gl::PreferIntegratedGpu;
+ gl::GpuPreference gpu_preference = gl::GpuPreference::kLowPower;
// -1 if invalid or unspecified.
int32_t alpha_size = -1;
int32_t blue_size = -1;
@@ -65,7 +65,6 @@ struct GPU_EXPORT ContextCreationAttribs {
bool enable_raster_interface = false;
bool enable_oop_rasterization = false;
bool enable_swap_timestamps_if_supported = false;
- bool backed_by_surface_texture = false;
ContextType context_type = CONTEXT_TYPE_OPENGLES2;
ColorSpace color_space = COLOR_SPACE_UNSPECIFIED;
diff --git a/chromium/gpu/command_buffer/common/discardable_handle.h b/chromium/gpu/command_buffer/common/discardable_handle.h
index 33f2d859139..dbc1149df8b 100644
--- a/chromium/gpu/command_buffer/common/discardable_handle.h
+++ b/chromium/gpu/command_buffer/common/discardable_handle.h
@@ -6,7 +6,7 @@
#define GPU_COMMAND_BUFFER_COMMON_DISCARDABLE_HANDLE_H_
#include "base/memory/ref_counted.h"
-#include "gpu/command_buffer/common/id_type.h"
+#include "base/util/type_safety/id_type.h"
#include "gpu/gpu_export.h"
namespace gpu {
@@ -83,7 +83,7 @@ class GPU_EXPORT DiscardableHandleBase {
// handle (via the constructor), and can Lock an existing handle.
class GPU_EXPORT ClientDiscardableHandle : public DiscardableHandleBase {
public:
- using Id = IdType32<ClientDiscardableHandle>;
+ using Id = util::IdType32<ClientDiscardableHandle>;
ClientDiscardableHandle(); // Constructs an invalid handle.
ClientDiscardableHandle(scoped_refptr<Buffer> buffer,
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index 4d57dceee99..20750bb6e80 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -1009,6 +1009,10 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_LATENCY_QUERY_CHROMIUM",
},
{
+ 0x6009,
+ "GL_PROGRAM_COMPLETION_QUERY_CHROMIUM",
+ },
+ {
0x78EC,
"GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM",
},
@@ -7046,14 +7050,9 @@ std::string GLES2Util::GetStringGLState(uint32_t value) {
{GL_TRANSFORM_FEEDBACK_BUFFER_BINDING,
"GL_TRANSFORM_FEEDBACK_BUFFER_BINDING"},
{GL_TRANSFORM_FEEDBACK_PAUSED, "GL_TRANSFORM_FEEDBACK_PAUSED"},
- {GL_TRANSFORM_FEEDBACK_BUFFER_SIZE, "GL_TRANSFORM_FEEDBACK_BUFFER_SIZE"},
- {GL_TRANSFORM_FEEDBACK_BUFFER_START,
- "GL_TRANSFORM_FEEDBACK_BUFFER_START"},
{GL_UNIFORM_BUFFER_BINDING, "GL_UNIFORM_BUFFER_BINDING"},
{GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT,
"GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT"},
- {GL_UNIFORM_BUFFER_SIZE, "GL_UNIFORM_BUFFER_SIZE"},
- {GL_UNIFORM_BUFFER_START, "GL_UNIFORM_BUFFER_START"},
{GL_UNPACK_IMAGE_HEIGHT, "GL_UNPACK_IMAGE_HEIGHT"},
{GL_UNPACK_ROW_LENGTH, "GL_UNPACK_ROW_LENGTH"},
{GL_UNPACK_SKIP_IMAGES, "GL_UNPACK_SKIP_IMAGES"},
@@ -7376,6 +7375,8 @@ std::string GLES2Util::GetStringQueryTarget(uint32_t value) {
{GL_COMMANDS_COMPLETED_CHROMIUM, "GL_COMMANDS_COMPLETED_CHROMIUM"},
{GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM,
"GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM"},
+ {GL_PROGRAM_COMPLETION_QUERY_CHROMIUM,
+ "GL_PROGRAM_COMPLETION_QUERY_CHROMIUM"},
};
return GLES2Util::GetQualifiedEnumString(string_table,
base::size(string_table), value);
diff --git a/chromium/gpu/command_buffer/common/id_type.h b/chromium/gpu/command_buffer/common/id_type.h
deleted file mode 100644
index e3efbfa36b2..00000000000
--- a/chromium/gpu/command_buffer/common/id_type.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_COMMAND_BUFFER_COMMON_ID_TYPE_H_
-#define GPU_COMMAND_BUFFER_COMMON_ID_TYPE_H_
-
-#include <stdint.h>
-#include <cstddef>
-#include <ostream>
-#include <type_traits>
-
-// IdType32<>, IdType64<>, etc. wrap an integer id in a custom, type-safe type.
-//
-// IdType32<Foo> is an alternative to int, for a class Foo with methods like:
-//
-// int GetId() { return id_; };
-// static Foo* FromId(int id) { return g_all_foos_by_id[id]; }
-//
-// Such methods are a standard means of safely referring to objects across
-// thread and process boundaries. But if a nearby class Bar also represents
-// its IDs as a bare int, horrific mixups are possible -- one example, of many,
-// is http://crrev.com/365437. IdType<> offers compile-time protection against
-// such mishaps, since IdType32<Foo> is incompatible with IdType32<Bar>, even
-// though both just compile down to an int32_t.
-//
-// Templates in this file:
-// IdType32<T> / IdTypeU32<T>: Signed / unsigned 32-bit IDs
-// IdType64<T> / IdTypeU64<T>: Signed / unsigned 64-bit IDs
-// IdType<>: For when you need a different underlying type or
-// a default/null value other than zero.
-//
-// IdType32<Foo> behaves just like an int32_t in the following aspects:
-// - it can be used as a key in std::map and/or std::unordered_map;
-// - it can be used as an argument to DCHECK_EQ or streamed to LOG(ERROR);
-// - it has the same memory footprint and runtime overhead as int32_t;
-// - it can be copied by memcpy.
-// - it can be used in IPC messages.
-//
-// IdType32<Foo> has the following differences from a bare int32_t:
-// - it forces coercions to go through GetUnsafeValue and FromUnsafeValue;
-// - it restricts the set of available operations (i.e. no multiplication);
-// - it ensures initialization to zero and allows checking against
-// default-initialized values via is_null method.
-
-namespace gpu {
-
-template <typename TypeMarker, typename WrappedType, WrappedType kInvalidValue>
-class IdType {
- public:
- IdType() : value_(kInvalidValue) {}
- bool is_null() const { return value_ == kInvalidValue; }
-
- static IdType FromUnsafeValue(WrappedType value) { return IdType(value); }
- WrappedType GetUnsafeValue() const { return value_; }
-
- IdType(const IdType& other) = default;
- IdType& operator=(const IdType& other) = default;
-
- bool operator==(const IdType& other) const { return value_ == other.value_; }
- bool operator!=(const IdType& other) const { return value_ != other.value_; }
- bool operator<(const IdType& other) const { return value_ < other.value_; }
- bool operator<=(const IdType& other) const { return value_ <= other.value_; }
-
- // Hasher to use in std::unordered_map, std::unordered_set, etc.
- struct Hasher {
- using argument_type = IdType;
- using result_type = std::size_t;
- result_type operator()(const argument_type& id) const {
- return std::hash<WrappedType>()(id.GetUnsafeValue());
- }
- };
-
- protected:
- explicit IdType(WrappedType val) : value_(val) {}
-
- private:
- // In theory WrappedType could be any type that supports ==, <, <<, std::hash,
- // etc., but to make things simpler (both for users and for maintainers) we
- // explicitly restrict the design space to integers. This means the users
- // can safely assume that IdType is relatively small and cheap to copy
- // and the maintainers don't have to worry about WrappedType being a complex
- // type (i.e. std::string or std::pair or a move-only type).
- using IntegralWrappedType =
- typename std::enable_if<std::is_integral<WrappedType>::value,
- WrappedType>::type;
- IntegralWrappedType value_;
-};
-
-// Type aliases for convenience:
-template <typename TypeMarker>
-using IdType32 = IdType<TypeMarker, int32_t, 0>;
-template <typename TypeMarker>
-using IdTypeU32 = IdType<TypeMarker, uint32_t, 0>;
-template <typename TypeMarker>
-using IdType64 = IdType<TypeMarker, int64_t, 0>;
-template <typename TypeMarker>
-using IdTypeU64 = IdType<TypeMarker, uint64_t, 0>;
-
-template <typename TypeMarker, typename WrappedType, WrappedType kInvalidValue>
-std::ostream& operator<<(
- std::ostream& stream,
- const IdType<TypeMarker, WrappedType, kInvalidValue>& id) {
- return stream << id.GetUnsafeValue();
-}
-
-} // namespace gpu
-
-#endif // CONTENT_COMMON_ID_TYPE_H_
diff --git a/chromium/gpu/command_buffer/common/id_type_unittest.cc b/chromium/gpu/command_buffer/common/id_type_unittest.cc
deleted file mode 100644
index a8f0b349e8b..00000000000
--- a/chromium/gpu/command_buffer/common/id_type_unittest.cc
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-#include <map>
-#include <sstream>
-#include <string>
-#include <type_traits>
-#include <unordered_map>
-
-#include "gpu/command_buffer/common/id_type.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace gpu {
-
-namespace {
-
-class Foo;
-using FooId = IdType<Foo, int, 0>;
-
-class Bar;
-using BarId = IdType<Bar, int, 0>;
-
-class AnotherIdMarker;
-class DerivedId : public IdType<AnotherIdMarker, int, 0> {
- public:
- explicit DerivedId(int unsafe_value)
- : IdType<AnotherIdMarker, int, 0>(unsafe_value) {}
-};
-
-} // namespace
-
-TEST(IdType, DefaultValueIsInvalid) {
- FooId foo_id;
- EXPECT_TRUE(foo_id.is_null());
-}
-
-TEST(IdType, NormalValueIsValid) {
- FooId foo_id = FooId::FromUnsafeValue(123);
- EXPECT_FALSE(foo_id.is_null());
-}
-
-TEST(IdType, OutputStreamTest) {
- FooId foo_id = FooId::FromUnsafeValue(123);
-
- std::ostringstream ss;
- ss << foo_id;
- EXPECT_EQ("123", ss.str());
-}
-
-TEST(IdType, IdType32) {
- IdType32<Foo> id;
-
- EXPECT_EQ(0, id.GetUnsafeValue());
- static_assert(sizeof(int32_t) == sizeof(id), "");
-}
-
-TEST(IdType, IdTypeU32) {
- IdTypeU32<Foo> id;
-
- EXPECT_EQ(0u, id.GetUnsafeValue());
- static_assert(sizeof(uint32_t) == sizeof(id), "");
-}
-
-TEST(IdType, IdType64) {
- IdType64<Foo> id;
-
- EXPECT_EQ(0, id.GetUnsafeValue());
- static_assert(sizeof(int64_t) == sizeof(id), "");
-}
-
-TEST(IdType, IdTypeU64) {
- IdTypeU64<Foo> id;
-
- EXPECT_EQ(0u, id.GetUnsafeValue());
- static_assert(sizeof(uint64_t) == sizeof(id), "");
-}
-
-TEST(IdType, DerivedClasses) {
- DerivedId derived_id(456);
-
- std::ostringstream ss;
- ss << derived_id;
- EXPECT_EQ("456", ss.str());
-
- std::map<DerivedId, std::string> ordered_map;
- ordered_map[derived_id] = "blah";
- EXPECT_EQ(ordered_map[derived_id], "blah");
-
- std::unordered_map<DerivedId, std::string, DerivedId::Hasher> unordered_map;
- unordered_map[derived_id] = "blah2";
- EXPECT_EQ(unordered_map[derived_id], "blah2");
-}
-
-TEST(IdType, StaticAsserts) {
- static_assert(!std::is_constructible<FooId, int>::value,
- "Should be impossible to construct FooId from a raw integer.");
- static_assert(!std::is_convertible<int, FooId>::value,
- "Should be impossible to convert a raw integer into FooId.");
-
- static_assert(!std::is_constructible<FooId, BarId>::value,
- "Should be impossible to construct FooId from a BarId.");
- static_assert(!std::is_convertible<BarId, FooId>::value,
- "Should be impossible to convert a BarId into FooId.");
-
- // The presence of a custom default constructor means that FooId is not a
- // "trivial" class and therefore is not a POD type (unlike an int32_t).
- // At the same time FooId has almost all of the properties of a POD type:
- // - is "trivially copyable" (i.e. is memcpy-able),
- // - has "standard layout" (i.e. interops with things expecting C layout).
- // See http://stackoverflow.com/a/7189821 for more info about these
- // concepts.
- static_assert(std::is_standard_layout<FooId>::value,
- "FooId should have standard layout. "
- "See http://stackoverflow.com/a/7189821 for more info.");
- static_assert(sizeof(FooId) == sizeof(int),
- "FooId should be the same size as the raw integer it wraps.");
- // TODO(lukasza): Enable these once <type_traits> supports all the standard
- // C++11 equivalents (i.e. std::is_trivially_copyable instead of the
- // non-standard std::has_trivial_copy_assign).
- // static_assert(std::has_trivial_copy_constructor<FooId>::value,
- // "FooId should have a trivial copy constructor.");
- // static_assert(std::has_trivial_copy_assign<FooId>::value,
- // "FooId should have a trivial copy assignment operator.");
- // static_assert(std::has_trivial_destructor<FooId>::value,
- // "FooId should have a trivial destructor.");
-}
-
-class IdTypeSpecificValueTest : public ::testing::TestWithParam<int> {
- protected:
- FooId test_id() { return FooId::FromUnsafeValue(GetParam()); }
-
- FooId other_id() {
- if (GetParam() != std::numeric_limits<int>::max())
- return FooId::FromUnsafeValue(GetParam() + 1);
- else
- return FooId::FromUnsafeValue(std::numeric_limits<int>::min());
- }
-};
-
-TEST_P(IdTypeSpecificValueTest, ComparisonToSelf) {
- EXPECT_TRUE(test_id() == test_id());
- EXPECT_FALSE(test_id() != test_id());
- EXPECT_FALSE(test_id() < test_id());
-}
-
-TEST_P(IdTypeSpecificValueTest, ComparisonToOther) {
- EXPECT_FALSE(test_id() == other_id());
- EXPECT_TRUE(test_id() != other_id());
-}
-
-TEST_P(IdTypeSpecificValueTest, UnsafeValueRoundtrips) {
- int original_value = GetParam();
- FooId id = FooId::FromUnsafeValue(original_value);
- int final_value = id.GetUnsafeValue();
- EXPECT_EQ(original_value, final_value);
-}
-
-TEST_P(IdTypeSpecificValueTest, Copying) {
- FooId original = test_id();
-
- FooId copy_via_constructor(original);
- EXPECT_EQ(original, copy_via_constructor);
-
- FooId copy_via_assignment;
- copy_via_assignment = original;
- EXPECT_EQ(original, copy_via_assignment);
-}
-
-TEST_P(IdTypeSpecificValueTest, StdUnorderedMap) {
- std::unordered_map<FooId, std::string, FooId::Hasher> map;
-
- map[test_id()] = "test_id";
- map[other_id()] = "other_id";
-
- EXPECT_EQ(map[test_id()], "test_id");
- EXPECT_EQ(map[other_id()], "other_id");
-}
-
-TEST_P(IdTypeSpecificValueTest, StdMap) {
- std::map<FooId, std::string> map;
-
- map[test_id()] = "test_id";
- map[other_id()] = "other_id";
-
- EXPECT_EQ(map[test_id()], "test_id");
- EXPECT_EQ(map[other_id()], "other_id");
-}
-
-INSTANTIATE_TEST_SUITE_P(,
- IdTypeSpecificValueTest,
- ::testing::Values(std::numeric_limits<int>::min(),
- -1,
- 0,
- 1,
- 123,
- std::numeric_limits<int>::max()));
-
-} // namespace gpu
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
index 327d1f95858..09d6a09fe78 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format.h
@@ -13,6 +13,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "gpu/command_buffer/common/common_cmd_format.h"
+#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
#include "ui/gfx/buffer_types.h"
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h
index fdb77cc04d8..55d67cf9524 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format_autogen.h
@@ -59,4 +59,109 @@ static_assert(offsetof(DawnCommands, commands_shm_offset) == 8,
static_assert(offsetof(DawnCommands, size) == 12,
"offset of DawnCommands size should be 12");
+struct AssociateMailboxImmediate {
+ typedef AssociateMailboxImmediate ValueType;
+ static const CommandId kCmdId = kAssociateMailboxImmediate;
+ static const cmd::ArgFlags kArgFlags = cmd::kAtLeastN;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeDataSize() {
+ return static_cast<uint32_t>(sizeof(GLbyte) * 16);
+ }
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType) + ComputeDataSize());
+ }
+
+ void SetHeader() { header.SetCmdByTotalSize<ValueType>(ComputeSize()); }
+
+ void Init(GLuint _device_id,
+ GLuint _device_generation,
+ GLuint _id,
+ GLuint _generation,
+ GLuint _usage,
+ const GLbyte* _mailbox) {
+ SetHeader();
+ device_id = _device_id;
+ device_generation = _device_generation;
+ id = _id;
+ generation = _generation;
+ usage = _usage;
+ memcpy(ImmediateDataAddress(this), _mailbox, ComputeDataSize());
+ }
+
+ void* Set(void* cmd,
+ GLuint _device_id,
+ GLuint _device_generation,
+ GLuint _id,
+ GLuint _generation,
+ GLuint _usage,
+ const GLbyte* _mailbox) {
+ static_cast<ValueType*>(cmd)->Init(_device_id, _device_generation, _id,
+ _generation, _usage, _mailbox);
+ const uint32_t size = ComputeSize();
+ return NextImmediateCmdAddressTotalSize<ValueType>(cmd, size);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t device_id;
+ uint32_t device_generation;
+ uint32_t id;
+ uint32_t generation;
+ uint32_t usage;
+};
+
+static_assert(sizeof(AssociateMailboxImmediate) == 24,
+ "size of AssociateMailboxImmediate should be 24");
+static_assert(offsetof(AssociateMailboxImmediate, header) == 0,
+ "offset of AssociateMailboxImmediate header should be 0");
+static_assert(offsetof(AssociateMailboxImmediate, device_id) == 4,
+ "offset of AssociateMailboxImmediate device_id should be 4");
+static_assert(
+ offsetof(AssociateMailboxImmediate, device_generation) == 8,
+ "offset of AssociateMailboxImmediate device_generation should be 8");
+static_assert(offsetof(AssociateMailboxImmediate, id) == 12,
+ "offset of AssociateMailboxImmediate id should be 12");
+static_assert(offsetof(AssociateMailboxImmediate, generation) == 16,
+ "offset of AssociateMailboxImmediate generation should be 16");
+static_assert(offsetof(AssociateMailboxImmediate, usage) == 20,
+ "offset of AssociateMailboxImmediate usage should be 20");
+
+struct DissociateMailbox {
+ typedef DissociateMailbox ValueType;
+ static const CommandId kCmdId = kDissociateMailbox;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(1);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _texture_id, GLuint _texture_generation) {
+ SetHeader();
+ texture_id = _texture_id;
+ texture_generation = _texture_generation;
+ }
+
+ void* Set(void* cmd, GLuint _texture_id, GLuint _texture_generation) {
+ static_cast<ValueType*>(cmd)->Init(_texture_id, _texture_generation);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t texture_id;
+ uint32_t texture_generation;
+};
+
+static_assert(sizeof(DissociateMailbox) == 12,
+ "size of DissociateMailbox should be 12");
+static_assert(offsetof(DissociateMailbox, header) == 0,
+ "offset of DissociateMailbox header should be 0");
+static_assert(offsetof(DissociateMailbox, texture_id) == 4,
+ "offset of DissociateMailbox texture_id should be 4");
+static_assert(offsetof(DissociateMailbox, texture_generation) == 8,
+ "offset of DissociateMailbox texture_generation should be 8");
+
#endif // GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h
index 61ac26656b6..8096fe5d7ea 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h
@@ -28,4 +28,55 @@ TEST_F(WebGPUFormatTest, DawnCommands) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(WebGPUFormatTest, AssociateMailboxImmediate) {
+ const int kSomeBaseValueToTestWith = 51;
+ static GLbyte data[] = {
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 0),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 1),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 2),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 3),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 4),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 5),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 6),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 7),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 8),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 9),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 10),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 11),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 12),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 13),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 14),
+ static_cast<GLbyte>(kSomeBaseValueToTestWith + 15),
+ };
+ cmds::AssociateMailboxImmediate& cmd =
+ *GetBufferAs<cmds::AssociateMailboxImmediate>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12),
+ static_cast<GLuint>(13), static_cast<GLuint>(14),
+ static_cast<GLuint>(15), data);
+ EXPECT_EQ(static_cast<uint32_t>(cmds::AssociateMailboxImmediate::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)),
+ cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.device_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.device_generation);
+ EXPECT_EQ(static_cast<GLuint>(13), cmd.id);
+ EXPECT_EQ(static_cast<GLuint>(14), cmd.generation);
+ EXPECT_EQ(static_cast<GLuint>(15), cmd.usage);
+ CheckBytesWrittenMatchesExpectedSize(
+ next_cmd, sizeof(cmd) + RoundSizeToMultipleOfEntries(sizeof(data)));
+}
+
+TEST_F(WebGPUFormatTest, DissociateMailbox) {
+ cmds::DissociateMailbox& cmd = *GetBufferAs<cmds::DissociateMailbox>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DissociateMailbox::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.texture_id);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.texture_generation);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
#endif // GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h
index 0c953a8a7cb..ff35315d2d1 100644
--- a/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/webgpu_cmd_ids_autogen.h
@@ -11,7 +11,10 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_IDS_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_COMMON_WEBGPU_CMD_IDS_AUTOGEN_H_
-#define WEBGPU_COMMAND_LIST(OP) OP(DawnCommands) /* 256 */
+#define WEBGPU_COMMAND_LIST(OP) \
+ OP(DawnCommands) /* 256 */ \
+ OP(AssociateMailboxImmediate) /* 257 */ \
+ OP(DissociateMailbox) /* 258 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index 8ff7c571352..5a84425376f 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -79,6 +79,7 @@ target(link_target_type, "service_sources") {
]
public_deps = [
+ "//base/util/type_safety",
"//gpu/command_buffer/common:common_sources",
"//url:url",
]
@@ -282,6 +283,7 @@ target(link_target_type, "gles2_sources") {
include_dirs = [ "//third_party/mesa_headers" ]
public_deps = [
+ "//base/util/type_safety",
"//cc/paint",
"//gpu/command_buffer/common",
"//gpu/command_buffer/common:gles2_sources",
@@ -345,6 +347,7 @@ target(link_target_type, "gles2_sources") {
}
if (is_mac) {
+ deps += [ "//components/viz/common:metal_context_provider" ]
sources += [
"shared_image_backing_factory_iosurface.h",
"shared_image_backing_factory_iosurface.mm",
@@ -354,6 +357,7 @@ target(link_target_type, "gles2_sources") {
libs = [
"Cocoa.framework",
"IOSurface.framework",
+ "Metal.framework",
"OpenGL.framework",
]
}
@@ -384,6 +388,13 @@ target(link_target_type, "gles2_sources") {
]
}
}
+
+ if (is_win) {
+ sources += [
+ "swap_chain_factory_dxgi.cc",
+ "swap_chain_factory_dxgi.h",
+ ]
+ }
}
proto_library("disk_cache_proto") {
diff --git a/chromium/gpu/command_buffer/service/DEPS b/chromium/gpu/command_buffer/service/DEPS
index 1373db162ff..00595bae252 100644
--- a/chromium/gpu/command_buffer/service/DEPS
+++ b/chromium/gpu/command_buffer/service/DEPS
@@ -1,6 +1,7 @@
include_rules = [
"+cc/paint",
"+third_party/skia",
+ "+components/viz/common/gpu/metal_context_provider.h",
"+components/viz/common/gpu/vulkan_context_provider.h",
"+components/viz/common/resources/resource_format.h",
"+components/viz/common/resources/resource_format_utils.h",
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
index 2fe75b76725..f133e9a574d 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
@@ -24,8 +24,8 @@ AbstractTextureImplOnSharedContext::AbstractTextureImplOnSharedContext(
GLint border,
GLenum format,
GLenum type,
- gpu::SharedContextState* shared_context_state)
- : shared_context_state_(shared_context_state) {
+ scoped_refptr<gpu::SharedContextState> shared_context_state)
+ : shared_context_state_(std::move(shared_context_state)) {
DCHECK(shared_context_state_);
// The calling code which wants to create this abstract texture should have
@@ -113,7 +113,7 @@ void AbstractTextureImplOnSharedContext::OnContextLost() {
if (cleanup_cb_)
std::move(cleanup_cb_).Run(this);
shared_context_state_->RemoveContextLostObserver(this);
- shared_context_state_ = nullptr;
+ shared_context_state_.reset();
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h
index 78a9ae70083..f9b87579450 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.h
@@ -31,7 +31,7 @@ class GPU_GLES2_EXPORT AbstractTextureImplOnSharedContext
GLint border,
GLenum format,
GLenum type,
- gpu::SharedContextState* shared_context_state);
+ scoped_refptr<gpu::SharedContextState> shared_context_state);
~AbstractTextureImplOnSharedContext() override;
// AbstractTexture implementation.
@@ -49,7 +49,7 @@ class GPU_GLES2_EXPORT AbstractTextureImplOnSharedContext
private:
Texture* texture_;
- SharedContextState* shared_context_state_ = nullptr;
+ scoped_refptr<SharedContextState> shared_context_state_;
CleanupCallback cleanup_cb_;
};
diff --git a/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc b/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
index 9237e0b9c1a..fc94e3db841 100644
--- a/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
+++ b/chromium/gpu/command_buffer/service/command_buffer_service_unittest.cc
@@ -17,6 +17,7 @@
namespace gpu {
using testing::_;
+using testing::DoAll;
using testing::Mock;
using testing::Return;
using testing::Sequence;
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index 4c23e701e7f..7be2287e19a 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -5,13 +5,21 @@
#include "gpu/command_buffer/service/external_vk_image_backing.h"
#include <utility>
+#include <vector>
+#include "base/memory/unsafe_shared_memory_region.h"
#include "base/posix/eintr_wrapper.h"
+#include "base/system/sys_info.h"
#include "build/build_config.h"
+#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/vulkan/vulkan_command_buffer.h"
+#include "gpu/vulkan/vulkan_command_pool.h"
+#include "gpu/vulkan/vulkan_fence_helper.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/gl_context.h"
#if defined(OS_FUCHSIA)
@@ -24,6 +32,309 @@
namespace gpu {
+namespace {
+
+VkResult CreateExternalVkImage(SharedContextState* context_state,
+ VkFormat format,
+ const gfx::Size& size,
+ bool is_transfer_dst,
+ VkImage* image) {
+ VkExternalMemoryImageCreateInfoKHR external_info = {
+ .sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR,
+ .handleTypes = context_state->vk_context_provider()
+ ->GetVulkanImplementation()
+ ->GetExternalImageHandleType(),
+ };
+
+ auto usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (is_transfer_dst)
+ usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
+
+ VkImageCreateInfo create_info = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
+ .pNext = &external_info,
+ .flags = 0,
+ .imageType = VK_IMAGE_TYPE_2D,
+ .format = format,
+ .extent = {size.width(), size.height(), 1},
+ .mipLevels = 1,
+ .arrayLayers = 1,
+ .samples = VK_SAMPLE_COUNT_1_BIT,
+ .tiling = VK_IMAGE_TILING_OPTIMAL,
+ .usage = usage,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .queueFamilyIndexCount = 0,
+ .pQueueFamilyIndices = nullptr,
+ .initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
+ };
+
+ VkDevice device =
+ context_state->vk_context_provider()->GetDeviceQueue()->GetVulkanDevice();
+ return vkCreateImage(device, &create_info, nullptr, image);
+}
+
+void TransitionToColorAttachment(VkImage image,
+ SharedContextState* context_state,
+ VulkanCommandPool* command_pool) {
+ auto command_buffer = command_pool->CreatePrimaryCommandBuffer();
+ CHECK(command_buffer->Initialize());
+
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+ command_buffer->TransitionImageLayout(
+ image, VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ }
+
+ // TODO(penghuang): get rid of this submission if poosible.
+ command_buffer->Submit(0, nullptr, 0, nullptr);
+
+ context_state->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper()
+ ->EnqueueVulkanObjectCleanupForSubmittedWork(std::move(command_buffer));
+}
+
+uint32_t FindMemoryTypeIndex(SharedContextState* context_state,
+ const VkMemoryRequirements& requirements,
+ VkMemoryPropertyFlags flags) {
+ VkPhysicalDevice physical_device = context_state->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanPhysicalDevice();
+ VkPhysicalDeviceMemoryProperties properties;
+ vkGetPhysicalDeviceMemoryProperties(physical_device, &properties);
+ constexpr uint32_t kInvalidTypeIndex = 32;
+ for (uint32_t i = 0; i < kInvalidTypeIndex; i++) {
+ if (((1u << i) & requirements.memoryTypeBits) == 0)
+ continue;
+ if ((properties.memoryTypes[i].propertyFlags & flags) != flags)
+ continue;
+ return i;
+ }
+ NOTREACHED();
+ return kInvalidTypeIndex;
+}
+
+} // namespace
+
+// static
+std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
+ SharedContextState* context_state,
+ VulkanCommandPool* command_pool,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data,
+ bool using_gmb) {
+ VkDevice device =
+ context_state->vk_context_provider()->GetDeviceQueue()->GetVulkanDevice();
+ VkFormat vk_format = ToVkFormat(format);
+ VkImage image;
+ bool is_transfer_dst = using_gmb || !pixel_data.empty();
+ VkResult result = CreateExternalVkImage(context_state, vk_format, size,
+ is_transfer_dst, &image);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "Failed to create external VkImage: " << result;
+ return nullptr;
+ }
+
+ VkMemoryRequirements requirements;
+ vkGetImageMemoryRequirements(device, image, &requirements);
+
+ if (!requirements.memoryTypeBits) {
+ DLOG(ERROR)
+ << "Unable to find appropriate memory type for external VkImage";
+ vkDestroyImage(device, image, nullptr);
+ return nullptr;
+ }
+
+ VkExportMemoryAllocateInfoKHR external_info = {
+ .sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR,
+ .handleTypes = context_state->vk_context_provider()
+ ->GetVulkanImplementation()
+ ->GetExternalImageHandleType(),
+ };
+
+ VkMemoryAllocateInfo mem_alloc_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .pNext = &external_info,
+ .allocationSize = requirements.size,
+ .memoryTypeIndex = FindMemoryTypeIndex(
+ context_state, requirements, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
+ };
+
+ VkDeviceMemory memory;
+ // TODO(crbug.com/932286): Allocating a separate piece of memory for every
+ // VkImage might have too much overhead. It is recommended that one large
+ // VkDeviceMemory be sub-allocated to multiple VkImages instead.
+ result = vkAllocateMemory(device, &mem_alloc_info, nullptr, &memory);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "Failed to allocate memory for external VkImage: " << result;
+ vkDestroyImage(device, image, nullptr);
+ return nullptr;
+ }
+
+ result = vkBindImageMemory(device, image, memory, 0);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "Failed to bind memory to external VkImage: " << result;
+ vkFreeMemory(device, memory, nullptr);
+ vkDestroyImage(device, image, nullptr);
+ return nullptr;
+ }
+
+ // TODO(penghuang): track image layout to avoid unnecessary image layout
+ // transition. https://crbug.com/965955
+ TransitionToColorAttachment(image, context_state, command_pool);
+
+ auto backing = base::WrapUnique(new ExternalVkImageBacking(
+ mailbox, format, size, color_space, usage, context_state, image, memory,
+ requirements.size, vk_format, command_pool));
+
+ if (!pixel_data.empty())
+ backing->WritePixels(pixel_data, 0);
+
+ return backing;
+}
+
+// static
+std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
+ SharedContextState* context_state,
+ VulkanCommandPool* command_pool,
+ const Mailbox& mailbox,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat buffer_format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ if (gfx::NumberOfPlanesForBufferFormat(buffer_format) != 1) {
+ DLOG(ERROR) << "Invalid image format.";
+ return nullptr;
+ }
+
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
+ DLOG(ERROR) << "Invalid image size for format.";
+ return nullptr;
+ }
+
+ auto* vulkan_implementation =
+ context_state->vk_context_provider()->GetVulkanImplementation();
+ auto resource_format = viz::GetResourceFormat(buffer_format);
+ if (vulkan_implementation->CanImportGpuMemoryBuffer(handle.type)) {
+ VkDevice vk_device = context_state->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetVulkanDevice();
+ VkImage vk_image = VK_NULL_HANDLE;
+ VkImageCreateInfo vk_image_info = {VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO};
+ VkDeviceMemory vk_device_memory = VK_NULL_HANDLE;
+ VkDeviceSize memory_size = 0;
+
+ if (!vulkan_implementation->CreateImageFromGpuMemoryHandle(
+ vk_device, std::move(handle), size, &vk_image, &vk_image_info,
+ &vk_device_memory, &memory_size)) {
+ DLOG(ERROR) << "Failed to create VkImage from GpuMemoryHandle.";
+ return nullptr;
+ }
+
+ VkFormat expected_format = ToVkFormat(resource_format);
+ if (expected_format != vk_image_info.format) {
+ DLOG(ERROR) << "BufferFormat doesn't match the buffer";
+ vkFreeMemory(vk_device, vk_device_memory, nullptr);
+ vkDestroyImage(vk_device, vk_image, nullptr);
+ return nullptr;
+ }
+
+ // TODO(penghuang): track image layout to avoid unnecessary image layout
+ // transition. https://crbug.com/965955
+ TransitionToColorAttachment(vk_image, context_state, command_pool);
+
+ return base::WrapUnique(new ExternalVkImageBacking(
+ mailbox, viz::GetResourceFormat(buffer_format), size, color_space,
+ usage, context_state, vk_image, vk_device_memory, memory_size,
+ vk_image_info.format, command_pool));
+ }
+
+ DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
+ if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
+ return nullptr;
+
+ int32_t width_in_bytes = 0;
+ if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), resource_format,
+ &width_in_bytes)) {
+ DLOG(ERROR) << "ResourceSizes::MaybeWidthInBytes() failed.";
+ return nullptr;
+ }
+
+ if (handle.stride < width_in_bytes) {
+ DLOG(ERROR) << "Invalid GMB stride.";
+ return nullptr;
+ }
+
+ auto bits_per_pixel = viz::BitsPerPixel(resource_format);
+ switch (bits_per_pixel) {
+ case 64:
+ case 32:
+ case 16:
+ if (handle.stride % (bits_per_pixel / 8) != 0) {
+ DLOG(ERROR) << "Invalid GMB stride.";
+ return nullptr;
+ }
+ break;
+ case 8:
+ case 4:
+ break;
+ case 12:
+ // We are not supporting YVU420 and YUV_420_BIPLANAR format.
+ default:
+ NOTREACHED();
+ return nullptr;
+ }
+
+ if (!handle.region.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB shared memory region.";
+ return nullptr;
+ }
+
+ base::CheckedNumeric<size_t> checked_size = handle.stride;
+ checked_size *= size.height();
+ if (!checked_size.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB size.";
+ return nullptr;
+ }
+
+ // Minimize the amount of address space we use but make sure offset is a
+ // multiple of page size as required by MapAt().
+ size_t memory_offset =
+ handle.offset % base::SysInfo::VMAllocationGranularity();
+ size_t map_offset =
+ base::SysInfo::VMAllocationGranularity() *
+ (handle.offset / base::SysInfo::VMAllocationGranularity());
+ checked_size += memory_offset;
+ if (!checked_size.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB size.";
+ return nullptr;
+ }
+
+ auto shared_memory_mapping = handle.region.MapAt(
+ static_cast<off_t>(map_offset), checked_size.ValueOrDie());
+
+ if (!shared_memory_mapping.IsValid()) {
+ DLOG(ERROR) << "Failed to map shared memory.";
+ return nullptr;
+ }
+
+ auto backing = Create(context_state, command_pool, mailbox, resource_format,
+ size, color_space, usage, base::span<const uint8_t>(),
+ true /* using_gmb */);
+ if (!backing)
+ return nullptr;
+
+ backing->InstallSharedMemory(std::move(shared_memory_mapping), handle.stride,
+ memory_offset);
+ return backing;
+}
+
ExternalVkImageBacking::ExternalVkImageBacking(
const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -34,7 +345,8 @@ ExternalVkImageBacking::ExternalVkImageBacking(
VkImage image,
VkDeviceMemory memory,
size_t memory_size,
- VkFormat vk_format)
+ VkFormat vk_format,
+ VulkanCommandPool* command_pool)
: SharedImageBacking(mailbox,
format,
size,
@@ -46,59 +358,36 @@ ExternalVkImageBacking::ExternalVkImageBacking(
image_(image),
memory_(memory),
memory_size_(memory_size),
- vk_format_(vk_format) {}
+ vk_format_(vk_format),
+ command_pool_(command_pool) {}
ExternalVkImageBacking::~ExternalVkImageBacking() {
- // Destroy() will do any necessary cleanup.
+ DCHECK(image_ == VK_NULL_HANDLE);
+ DCHECK(memory_ == VK_NULL_HANDLE);
}
bool ExternalVkImageBacking::BeginAccess(
bool readonly,
std::vector<SemaphoreHandle>* semaphore_handles) {
- DCHECK(semaphore_handles);
- DCHECK(semaphore_handles->empty());
- if (is_write_in_progress_) {
- LOG(ERROR) << "Unable to begin read or write access because another write "
- "access is in progress";
- return false;
- }
-
- if (reads_in_progress_ && !readonly) {
- LOG(ERROR)
- << "Unable to begin write access because a read access is in progress";
- return false;
- }
-
if (readonly) {
- ++reads_in_progress_;
- // A semaphore will become unsignaled, when it has been signaled and waited,
- // so it is not safe to reuse it.
- semaphore_handles->push_back(std::move(write_semaphore_handle_));
- } else {
- is_write_in_progress_ = true;
- *semaphore_handles = std::move(read_semaphore_handles_);
- read_semaphore_handles_.clear();
- if (write_semaphore_handle_.is_valid())
- semaphore_handles->push_back(std::move(write_semaphore_handle_));
+ if (reads_in_progress_ == 0 && shared_memory_mapping_.IsValid() &&
+ shared_memory_is_updated_) {
+ if (!WritePixels(
+ shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
+ memory_offset_),
+ stride_))
+ return false;
+ shared_memory_is_updated_ = false;
+ }
}
- return true;
+ return BeginAccessInternal(readonly, semaphore_handles);
}
void ExternalVkImageBacking::EndAccess(bool readonly,
SemaphoreHandle semaphore_handle) {
- DCHECK(semaphore_handle.is_valid());
-
- if (readonly) {
- DCHECK_GT(reads_in_progress_, 0u);
- --reads_in_progress_;
- read_semaphore_handles_.push_back(std::move(semaphore_handle));
- } else {
- DCHECK(is_write_in_progress_);
- DCHECK(!write_semaphore_handle_.is_valid());
- DCHECK(read_semaphore_handles_.empty());
- is_write_in_progress_ = false;
- write_semaphore_handle_ = std::move(semaphore_handle);
- }
+ EndAccessInternal(readonly, std::move(semaphore_handle));
+ // TODO(penghuang): read pixels back from VkImage to shared memory GMB, if
+ // this feature is needed.
}
bool ExternalVkImageBacking::IsCleared() const {
@@ -109,21 +398,26 @@ void ExternalVkImageBacking::SetCleared() {
is_cleared_ = true;
}
-void ExternalVkImageBacking::Update() {}
+void ExternalVkImageBacking::Update() {
+ shared_memory_is_updated_ = true;
+}
void ExternalVkImageBacking::Destroy() {
- // TODO(crbug.com/932260): We call vkQueueWaitIdle to ensure all these objects
- // are no longer associated with any queue command that has not completed
- // execution yet. Remove this call once we have better alternatives.
- vkQueueWaitIdle(context_state()
- ->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanQueue());
- vkDestroyImage(device(), image_, nullptr);
- vkFreeMemory(device(), memory_, nullptr);
-
- if (texture_)
+ auto* fence_helper = context_state()
+ ->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueImageCleanupForSubmittedWork(image_, memory_);
+ image_ = VK_NULL_HANDLE;
+ memory_ = VK_NULL_HANDLE;
+
+ if (texture_) {
+ // Ensure that a context is current before removing the ref and calling
+ // glDeleteTextures.
+ if (!context_state()->context()->IsCurrent(nullptr))
+ context_state()->context()->MakeCurrent(context_state()->surface());
texture_->RemoveLightweightRef(have_context());
+ }
}
bool ExternalVkImageBacking::ProduceLegacyMailbox(
@@ -137,6 +431,11 @@ bool ExternalVkImageBacking::ProduceLegacyMailbox(
std::unique_ptr<SharedImageRepresentationGLTexture>
ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
+ if (!(usage() & SHARED_IMAGE_USAGE_GLES2)) {
+ DLOG(ERROR) << "The backing is not created with GLES2 usage.";
+ return nullptr;
+ }
+
#if defined(OS_FUCHSIA)
NOTIMPLEMENTED_LOG_ONCE();
return nullptr;
@@ -151,7 +450,8 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
int memory_fd = -1;
vkGetMemoryFdKHR(device(), &get_fd_info, &memory_fd);
if (memory_fd < 0) {
- LOG(ERROR) << "Unable to extract file descriptor out of external VkImage";
+ DLOG(ERROR)
+ << "Unable to extract file descriptor out of external VkImage";
return nullptr;
}
@@ -223,9 +523,233 @@ ExternalVkImageBacking::ProduceSkia(
// This backing type is only used when vulkan is enabled, so SkiaRenderer
// should also be using Vulkan.
DCHECK_EQ(context_state_, context_state.get());
- DCHECK(context_state->use_vulkan_gr_context());
+ DCHECK(context_state->GrContextIsVulkan());
return std::make_unique<ExternalVkImageSkiaRepresentation>(manager, this,
tracker);
}
+void ExternalVkImageBacking::InstallSharedMemory(
+ base::WritableSharedMemoryMapping shared_memory_mapping,
+ size_t stride,
+ size_t memory_offset) {
+ DCHECK(!shared_memory_mapping_.IsValid());
+ DCHECK(shared_memory_mapping.IsValid());
+ shared_memory_mapping_ = std::move(shared_memory_mapping);
+ stride_ = stride;
+ memory_offset_ = memory_offset;
+ Update();
+}
+
+bool ExternalVkImageBacking::WritePixels(
+ const base::span<const uint8_t>& pixel_data,
+ size_t stride) {
+ DCHECK(stride == 0 || size().height() * stride <= pixel_data.size());
+ VkBufferCreateInfo buffer_create_info = {
+ .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ .size = pixel_data.size(),
+ .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
+ .sharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ };
+ VkBuffer stage_buffer = VK_NULL_HANDLE;
+ // TODO: Consider reusing stage_buffer and stage_memory, if allocation causes
+ // performance issue.
+ VkResult result = vkCreateBuffer(device(), &buffer_create_info,
+ nullptr /* pAllocator */, &stage_buffer);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkCreateBuffer() failed." << result;
+ return false;
+ }
+
+ VkMemoryRequirements memory_requirements;
+ vkGetBufferMemoryRequirements(device(), stage_buffer, &memory_requirements);
+
+ VkMemoryAllocateInfo memory_allocate_info = {
+ .sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ .allocationSize = memory_requirements.size,
+ .memoryTypeIndex =
+ FindMemoryTypeIndex(context_state_, memory_requirements,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT),
+
+ };
+ VkDeviceMemory stage_memory = VK_NULL_HANDLE;
+ result = vkAllocateMemory(device(), &memory_allocate_info,
+ nullptr /* pAllocator */, &stage_memory);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkAllocateMemory() failed. " << result;
+ vkDestroyBuffer(device(), stage_buffer, nullptr /* pAllocator */);
+ return false;
+ }
+
+ result = vkBindBufferMemory(device(), stage_buffer, stage_memory,
+ 0 /* memoryOffset */);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkBindBufferMemory() failed. " << result;
+ vkDestroyBuffer(device(), stage_buffer, nullptr /* pAllocator */);
+ vkFreeMemory(device(), stage_memory, nullptr /* pAllocator */);
+ return false;
+ }
+
+ void* data = nullptr;
+ result = vkMapMemory(device(), stage_memory, 0 /* memoryOffset */,
+ pixel_data.size(), 0, &data);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkMapMemory() failed. " << result;
+ vkDestroyBuffer(device(), stage_buffer, nullptr /* pAllocator */);
+ vkFreeMemory(device(), stage_memory, nullptr /* pAllocator */);
+ return false;
+ }
+ memcpy(data, pixel_data.data(), pixel_data.size());
+ vkUnmapMemory(device(), stage_memory);
+
+ auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
+ CHECK(command_buffer->Initialize());
+
+ {
+ ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
+
+ // TODO(penghuang): track image layout to avoid unnecessary image layout
+ // transition. https://crbug.com/965955
+ command_buffer->TransitionImageLayout(
+ image(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+ uint32_t buffer_width =
+ stride ? stride * 8 / BitsPerPixel(format()) : size().width();
+ command_buffer->CopyBufferToImage(stage_buffer, image(), buffer_width,
+ size().height(), size().width(),
+ size().height());
+
+ // TODO(penghuang): track image layout to avoid unnecessary image layout
+ // transition. https://crbug.com/965955
+ command_buffer->TransitionImageLayout(
+ image(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
+ }
+
+ std::vector<gpu::SemaphoreHandle> handles;
+ if (!BeginAccessInternal(false /* readonly */, &handles)) {
+ DLOG(ERROR) << "BeginAccess() failed.";
+ vkDestroyBuffer(device(), stage_buffer, nullptr /* pAllocator */);
+ vkFreeMemory(device(), stage_memory, nullptr /* pAllocator */);
+ return false;
+ }
+
+ if (!need_sychronization()) {
+ DCHECK(handles.empty());
+ command_buffer->Submit(0, nullptr, 0, nullptr);
+ EndAccessInternal(false /* readonly */, SemaphoreHandle());
+
+ auto* fence_helper = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::move(command_buffer));
+ fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
+ stage_memory);
+
+ return true;
+ }
+
+ std::vector<VkSemaphore> begin_access_semaphores;
+ begin_access_semaphores.reserve(handles.size() + 1);
+ for (auto& handle : handles) {
+ VkSemaphore semaphore = vulkan_implementation()->ImportSemaphoreHandle(
+ device(), std::move(handle));
+ begin_access_semaphores.emplace_back(semaphore);
+ }
+
+ VkSemaphore end_access_semaphore =
+ vulkan_implementation()->CreateExternalSemaphore(device());
+ command_buffer->Submit(begin_access_semaphores.size(),
+ begin_access_semaphores.data(), 1,
+ &end_access_semaphore);
+
+ auto end_access_semphore_handle = vulkan_implementation()->GetSemaphoreHandle(
+ device(), end_access_semaphore);
+ EndAccessInternal(false /* readonly */,
+ std::move(end_access_semphore_handle));
+
+ auto* fence_helper =
+ context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
+ fence_helper->EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::move(command_buffer));
+ begin_access_semaphores.emplace_back(end_access_semaphore);
+ fence_helper->EnqueueSemaphoresCleanupForSubmittedWork(
+ begin_access_semaphores);
+ fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
+ stage_memory);
+
+ return true;
+}
+
+bool ExternalVkImageBacking::BeginAccessInternal(
+ bool readonly,
+ std::vector<SemaphoreHandle>* semaphore_handles) {
+ DCHECK(semaphore_handles);
+ DCHECK(semaphore_handles->empty());
+ if (is_write_in_progress_) {
+ DLOG(ERROR) << "Unable to begin read or write access because another write "
+ "access is in progress";
+ return false;
+ }
+
+ if (reads_in_progress_ && !readonly) {
+ DLOG(ERROR)
+ << "Unable to begin write access because a read access is in progress";
+ return false;
+ }
+
+ if (readonly) {
+ DLOG_IF(ERROR, reads_in_progress_)
+ << "Concurrent reading may cause problem.";
+ ++reads_in_progress_;
+ // If a shared image is read repeatedly without any write access,
+ // |read_semaphore_handles_| will never be consumed and released, and then
+ // chrome will run out of file descriptors. To avoid this problem, we wait
+ // on read semaphores for readonly access too. And in most cases, a shared
+ // image is only read from one vulkan device queue, so it should not have
+ // performance impact.
+ // TODO(penghuang): avoid waiting on read semaphores.
+ *semaphore_handles = std::move(read_semaphore_handles_);
+ read_semaphore_handles_.clear();
+
+ // A semaphore will become unsignaled, when it has been signaled and waited,
+ // so it is not safe to reuse it.
+ if (write_semaphore_handle_.is_valid())
+ semaphore_handles->push_back(std::move(write_semaphore_handle_));
+ } else {
+ is_write_in_progress_ = true;
+ *semaphore_handles = std::move(read_semaphore_handles_);
+ read_semaphore_handles_.clear();
+ if (write_semaphore_handle_.is_valid())
+ semaphore_handles->push_back(std::move(write_semaphore_handle_));
+ }
+ return true;
+}
+
+void ExternalVkImageBacking::EndAccessInternal(
+ bool readonly,
+ SemaphoreHandle semaphore_handle) {
+ if (readonly) {
+ DCHECK_GT(reads_in_progress_, 0u);
+ --reads_in_progress_;
+ } else {
+ DCHECK(is_write_in_progress_);
+ is_write_in_progress_ = false;
+ }
+
+ if (need_sychronization()) {
+ DCHECK(semaphore_handle.is_valid());
+ if (readonly) {
+ read_semaphore_handles_.push_back(std::move(semaphore_handle));
+ } else {
+ DCHECK(!write_semaphore_handle_.is_valid());
+ DCHECK(read_semaphore_handles_.empty());
+ write_semaphore_handle_ = std::move(semaphore_handle);
+ }
+ } else {
+ DCHECK(!semaphore_handle.is_valid());
+ }
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index 173846feab1..e008b9e04b7 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -9,39 +9,62 @@
#include <vector>
#include "base/memory/scoped_refptr.h"
+#include "base/memory/shared_memory_mapping.h"
#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/vulkan/semaphore_handle.h"
#include "gpu/vulkan/vulkan_device_queue.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
+class VulkanCommandPool;
+
class ExternalVkImageBacking : public SharedImageBacking {
public:
- ExternalVkImageBacking(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- SharedContextState* context_state,
- VkImage image,
- VkDeviceMemory memory,
- size_t memory_size,
- VkFormat vk_format);
+ static std::unique_ptr<ExternalVkImageBacking> Create(
+ SharedContextState* context_state,
+ VulkanCommandPool* command_pool,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ base::span<const uint8_t> pixel_data,
+ bool using_gmb = false);
+
+ static std::unique_ptr<ExternalVkImageBacking> CreateFromGMB(
+ SharedContextState* context_state,
+ VulkanCommandPool* command_pool,
+ const Mailbox& mailbox,
+ gfx::GpuMemoryBufferHandle handle,
+ gfx::BufferFormat buffer_format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage);
+
~ExternalVkImageBacking() override;
- VkImage image() { return image_; }
- VkDeviceMemory memory() { return memory_; }
- size_t memory_size() { return memory_size_; }
- VkFormat vk_format() { return vk_format_; }
- SharedContextState* context_state() { return context_state_; }
- VkDevice device() {
- return context_state_->vk_context_provider()
+ VkImage image() const { return image_; }
+ VkDeviceMemory memory() const { return memory_; }
+ size_t memory_size() const { return memory_size_; }
+ VkFormat vk_format() const { return vk_format_; }
+ SharedContextState* context_state() const { return context_state_; }
+ VulkanImplementation* vulkan_implementation() const {
+ return context_state()->vk_context_provider()->GetVulkanImplementation();
+ }
+ VkDevice device() const {
+ return context_state()
+ ->vk_context_provider()
->GetDeviceQueue()
->GetVulkanDevice();
}
+ bool need_sychronization() const {
+ return usage() & SHARED_IMAGE_USAGE_GLES2;
+ }
// Notifies the backing that an access will start. Return false if there is
// currently any other conflict access in progress. Otherwise, returns true
@@ -62,6 +85,10 @@ class ExternalVkImageBacking : public SharedImageBacking {
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override;
protected:
+ bool BeginAccessInternal(bool readonly,
+ std::vector<SemaphoreHandle>* semaphore_handles);
+ void EndAccessInternal(bool readonly, SemaphoreHandle semaphore_handle);
+
// SharedImageBacking implementation.
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
@@ -75,18 +102,46 @@ class ExternalVkImageBacking : public SharedImageBacking {
scoped_refptr<SharedContextState> context_state) override;
private:
+ ExternalVkImageBacking(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ SharedContextState* context_state,
+ VkImage image,
+ VkDeviceMemory memory,
+ size_t memory_size,
+ VkFormat vk_format,
+ VulkanCommandPool* command_pool);
+
+ // Install a shared memory GMB to the backing.
+ void InstallSharedMemory(
+ base::WritableSharedMemoryMapping shared_memory_mapping,
+ size_t stride,
+ size_t memory_offset);
+
+ bool WritePixels(const base::span<const uint8_t>& pixel_data, size_t stride);
+
SharedContextState* const context_state_;
- VkImage image_;
- VkDeviceMemory memory_;
+ VkImage image_ = VK_NULL_HANDLE;
+ VkDeviceMemory memory_ = VK_NULL_HANDLE;
SemaphoreHandle write_semaphore_handle_;
std::vector<SemaphoreHandle> read_semaphore_handles_;
- size_t memory_size_;
+ const size_t memory_size_;
bool is_cleared_ = false;
- VkFormat vk_format_;
+ const VkFormat vk_format_;
+ VulkanCommandPool* const command_pool_;
+
bool is_write_in_progress_ = false;
uint32_t reads_in_progress_ = 0;
gles2::Texture* texture_ = nullptr;
+ // GMB related stuff.
+ bool shared_memory_is_updated_ = false;
+ base::WritableSharedMemoryMapping shared_memory_mapping_;
+ size_t stride_ = 0;
+ size_t memory_offset_ = 0;
+
DISALLOW_COPY_AND_ASSIGN(ExternalVkImageBacking);
};
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
index e0b733298e6..5831d6e6e87 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.cc
@@ -7,27 +7,30 @@
#include <unistd.h>
#include "components/viz/common/gpu/vulkan_context_provider.h"
-#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/service/external_vk_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/vulkan/vulkan_command_buffer.h"
#include "gpu/vulkan/vulkan_command_pool.h"
#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_fence_helper.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_implementation.h"
-#include "third_party/skia/include/core/SkPromiseImageTexture.h"
-#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
namespace gpu {
ExternalVkImageFactory::ExternalVkImageFactory(
SharedContextState* context_state)
- : context_state_(context_state) {}
+ : context_state_(context_state),
+ command_pool_(context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->CreateCommandPool()) {}
ExternalVkImageFactory::~ExternalVkImageFactory() {
if (command_pool_) {
- command_pool_->Destroy();
- command_pool_.reset();
+ context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper()
+ ->EnqueueVulkanObjectCleanupForSubmittedWork(std::move(command_pool_));
}
}
@@ -39,75 +42,9 @@ std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
uint32_t usage,
bool is_thread_safe) {
DCHECK(!is_thread_safe);
- VkDevice device = context_state_->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanDevice();
-
- VkFormat vk_format = ToVkFormat(format);
- VkResult result;
- VkImage image;
- result = CreateExternalVkImage(vk_format, size, &image);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "Failed to create external VkImage: " << result;
- return nullptr;
- }
-
- VkMemoryRequirements requirements;
- vkGetImageMemoryRequirements(device, image, &requirements);
-
- if (!requirements.memoryTypeBits) {
- LOG(ERROR) << "Unable to find appropriate memory type for external VkImage";
- vkDestroyImage(device, image, nullptr);
- return nullptr;
- }
-
- constexpr uint32_t kInvalidTypeIndex = 32;
- uint32_t type_index = kInvalidTypeIndex;
- for (int i = 0; i < 32; i++) {
- if ((1u << i) & requirements.memoryTypeBits) {
- type_index = i;
- break;
- }
- }
- DCHECK_NE(kInvalidTypeIndex, type_index);
-
- VkExportMemoryAllocateInfoKHR external_info;
- external_info.sType = VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR;
- external_info.pNext = nullptr;
- external_info.handleTypes = context_state_->vk_context_provider()
- ->GetVulkanImplementation()
- ->GetExternalImageHandleType();
-
- VkMemoryAllocateInfo mem_alloc_info;
- mem_alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
- mem_alloc_info.pNext = &external_info;
- mem_alloc_info.allocationSize = requirements.size;
- mem_alloc_info.memoryTypeIndex = type_index;
-
- VkDeviceMemory memory;
- // TODO(crbug.com/932286): Allocating a separate piece of memory for every
- // VkImage might have too much overhead. It is recommended that one large
- // VkDeviceMemory be sub-allocated to multiple VkImages instead.
- result = vkAllocateMemory(device, &mem_alloc_info, nullptr, &memory);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "Failed to allocate memory for external VkImage: " << result;
- vkDestroyImage(device, image, nullptr);
- return nullptr;
- }
-
- result = vkBindImageMemory(device, image, memory, 0);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "Failed to bind memory to external VkImage: " << result;
- vkFreeMemory(device, memory, nullptr);
- vkDestroyImage(device, image, nullptr);
- return nullptr;
- }
-
- TransitionToColorAttachment(image);
-
- return std::make_unique<ExternalVkImageBacking>(
- mailbox, format, size, color_space, usage, context_state_, image, memory,
- requirements.size, vk_format);
+ return ExternalVkImageBacking::Create(context_state_, command_pool_.get(),
+ mailbox, format, size, color_space,
+ usage, base::span<const uint8_t>());
}
std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
@@ -117,195 +54,32 @@ std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
const gfx::ColorSpace& color_space,
uint32_t usage,
base::span<const uint8_t> pixel_data) {
- switch (format) {
- case viz::ETC1:
- case viz::RED_8:
- case viz::LUMINANCE_F16:
- case viz::R16_EXT:
- case viz::BGR_565:
- case viz::RG_88:
- case viz::BGRX_8888:
- case viz::RGBX_1010102:
- case viz::BGRX_1010102:
- case viz::YVU_420:
- case viz::YUV_420_BIPLANAR:
- case viz::UYVY_422:
- // TODO(https://crbug.com/945513): support all formats.
- LOG(ERROR) << "format " << format << " is not supported.";
- return nullptr;
- default:
- break;
- }
- auto sk_color_type = viz::ResourceFormatToClosestSkColorType(
- true /* gpu_compositing */, format);
- auto ii = SkImageInfo::Make(size.width(), size.height(), sk_color_type,
- kOpaque_SkAlphaType);
- // rows in pixel data are aligned with 4.
- size_t row_bytes = (ii.minRowBytes() + 3) & ~3;
- if (pixel_data.size() != ii.computeByteSize(row_bytes)) {
- LOG(ERROR) << "Initial data does not have expected size.";
- return nullptr;
- }
-
- auto backing = CreateSharedImage(mailbox, format, size, color_space, usage,
- false /* is_thread_safe */);
-
- if (!backing)
- return nullptr;
-
- ExternalVkImageBacking* vk_backing =
- static_cast<ExternalVkImageBacking*>(backing.get());
-
- std::vector<SemaphoreHandle> handles;
- if (!vk_backing->BeginAccess(false /* readonly */, &handles)) {
- LOG(ERROR) << "Failed to request write access of backing.";
- return nullptr;
- }
-
- DCHECK(handles.empty());
-
- // Create backend render target from the VkImage.
- GrVkAlloc alloc(vk_backing->memory(), 0 /* offset */,
- vk_backing->memory_size(), 0 /* flags */);
- GrVkImageInfo vk_image_info(vk_backing->image(), alloc,
- VK_IMAGE_TILING_OPTIMAL,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
- vk_backing->vk_format(), 1 /* levelCount */);
- GrBackendRenderTarget render_target(size.width(), size.height(),
- 1 /* sampleCnt */, vk_image_info);
- SkSurfaceProps surface_props(0, SkSurfaceProps::kLegacyFontHost_InitType);
- auto surface = SkSurface::MakeFromBackendRenderTarget(
- context_state_->gr_context(), render_target, kTopLeft_GrSurfaceOrigin,
- sk_color_type, nullptr, &surface_props);
- SkPixmap pixmap(ii, pixel_data.data(), row_bytes);
- surface->writePixels(pixmap, 0, 0);
-
- auto* vk_implementation =
- context_state_->vk_context_provider()->GetVulkanImplementation();
-
- VkSemaphore semaphore =
- vk_implementation->CreateExternalSemaphore(vk_backing->device());
- VkDevice device = context_state_->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanDevice();
- SemaphoreHandle semaphore_handle =
- vk_implementation->GetSemaphoreHandle(device, semaphore);
- if (!semaphore_handle.is_valid()) {
- LOG(ERROR) << "GetSemaphoreHandle() failed.";
- vkDestroySemaphore(device, semaphore, nullptr /* pAllocator */);
- return nullptr;
- }
-
- GrBackendSemaphore gr_semaphore;
- gr_semaphore.initVulkan(semaphore);
- if (surface->flushAndSignalSemaphores(1, &gr_semaphore) !=
- GrSemaphoresSubmitted::kYes) {
- LOG(ERROR) << "Failed to flush the surface.";
- vkDestroySemaphore(device, semaphore, nullptr /* pAllocator */);
- return nullptr;
- }
- vk_backing->EndAccess(false /* readonly */, std::move(semaphore_handle));
- VkQueue queue =
- context_state_->vk_context_provider()->GetDeviceQueue()->GetVulkanQueue();
- // TODO(https://crbug.com/932260): avoid blocking CPU thread.
- vkQueueWaitIdle(queue);
- vkDestroySemaphore(device, semaphore, nullptr /* pAllocator */);
-
- return backing;
+ return ExternalVkImageBacking::Create(context_state_, command_pool_.get(),
+ mailbox, format, size, color_space,
+ usage, pixel_data);
}
std::unique_ptr<SharedImageBacking> ExternalVkImageFactory::CreateSharedImage(
const Mailbox& mailbox,
int client_id,
gfx::GpuMemoryBufferHandle handle,
- gfx::BufferFormat format,
+ gfx::BufferFormat buffer_format,
SurfaceHandle surface_handle,
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
- // GpuMemoryBuffers supported is not implemented yet.
- NOTIMPLEMENTED();
- return nullptr;
-}
-
-VkResult ExternalVkImageFactory::CreateExternalVkImage(VkFormat format,
- const gfx::Size& size,
- VkImage* image) {
- VkDevice device = context_state_->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanDevice();
-
- VkExternalMemoryImageCreateInfoKHR external_info;
- external_info.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO_KHR;
- external_info.pNext = nullptr;
- external_info.handleTypes = context_state_->vk_context_provider()
- ->GetVulkanImplementation()
- ->GetExternalImageHandleType();
-
- VkImageCreateInfo create_info;
- create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
- create_info.pNext = &external_info;
- create_info.flags = 0;
- create_info.imageType = VK_IMAGE_TYPE_2D;
- create_info.format = format;
- create_info.extent = {size.width(), size.height(), 1};
- create_info.mipLevels = 1;
- create_info.arrayLayers = 1;
- create_info.samples = VK_SAMPLE_COUNT_1_BIT;
- create_info.tiling = VK_IMAGE_TILING_OPTIMAL;
- create_info.usage =
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
- create_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
- create_info.queueFamilyIndexCount = context_state_->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanQueueIndex();
- create_info.pQueueFamilyIndices = nullptr;
- create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-
- return vkCreateImage(device, &create_info, nullptr, image);
+ DCHECK(CanImportGpuMemoryBuffer(handle.type));
+ return ExternalVkImageBacking::CreateFromGMB(
+ context_state_, command_pool_.get(), mailbox, std::move(handle),
+ buffer_format, size, color_space, usage);
}
-void ExternalVkImageFactory::TransitionToColorAttachment(VkImage image) {
- if (!command_pool_) {
- command_pool_ = context_state_->vk_context_provider()
- ->GetDeviceQueue()
- ->CreateCommandPool();
- }
- std::unique_ptr<VulkanCommandBuffer> command_buffer =
- command_pool_->CreatePrimaryCommandBuffer();
- CHECK(command_buffer->Initialize());
- {
- ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
- VkImageMemoryBarrier image_memory_barrier;
- image_memory_barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
- image_memory_barrier.pNext = nullptr;
- image_memory_barrier.srcAccessMask = 0;
- image_memory_barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- image_memory_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
- image_memory_barrier.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
- image_memory_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- image_memory_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
- image_memory_barrier.image = image;
- image_memory_barrier.subresourceRange.aspectMask =
- VK_IMAGE_ASPECT_COLOR_BIT;
- image_memory_barrier.subresourceRange.baseMipLevel = 0;
- image_memory_barrier.subresourceRange.levelCount = 1;
- image_memory_barrier.subresourceRange.baseArrayLayer = 0;
- image_memory_barrier.subresourceRange.layerCount = 1;
- vkCmdPipelineBarrier(recorder.handle(), VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
- VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, 0, 0,
- nullptr, 0, nullptr, 1, &image_memory_barrier);
- }
- command_buffer->Submit(0, nullptr, 0, nullptr);
- // TODO(crbug.com/932260): Remove blocking call to VkQueueWaitIdle once we
- // have a better approach for determining when |command_buffer| is safe to
- // destroy.
- vkQueueWaitIdle(context_state_->vk_context_provider()
- ->GetDeviceQueue()
- ->GetVulkanQueue());
- command_buffer->Destroy();
- command_buffer.reset();
+bool ExternalVkImageFactory::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return context_state_->vk_context_provider()
+ ->GetVulkanImplementation()
+ ->CanImportGpuMemoryBuffer(memory_buffer_type) ||
+ memory_buffer_type == gfx::SHARED_MEMORY_BUFFER;
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_factory.h b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
index d730a41e01f..383b2df6706 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_factory.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_factory.h
@@ -48,6 +48,8 @@ class ExternalVkImageFactory : public SharedImageBackingFactory {
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
private:
VkResult CreateExternalVkImage(VkFormat format,
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index d8b662fde50..935ae6ef46d 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -24,15 +24,20 @@ ExternalVkImageSkiaRepresentation::ExternalVkImageSkiaRepresentation(
}
ExternalVkImageSkiaRepresentation::~ExternalVkImageSkiaRepresentation() {
+ DCHECK_EQ(access_mode_, kNone) << "Previoud access hasn't end yet";
+ DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
}
sk_sp<SkSurface> ExternalVkImageSkiaRepresentation::BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) {
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
DCHECK_EQ(access_mode_, kNone) << "Previous access hasn't ended yet";
DCHECK(!surface_);
- auto promise_texture = BeginAccess(false /* readonly */);
+ auto promise_texture =
+ BeginAccess(false /* readonly */, begin_semaphores, end_semaphores);
if (!promise_texture)
return nullptr;
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
@@ -51,17 +56,21 @@ void ExternalVkImageSkiaRepresentation::EndWriteAccess(
DCHECK_EQ(access_mode_, kWrite)
<< "EndWriteAccess is called before BeginWriteAccess";
DCHECK(surface_);
+
surface_ = nullptr;
EndAccess(false /* readonly */);
access_mode_ = kNone;
}
-sk_sp<SkPromiseImageTexture>
-ExternalVkImageSkiaRepresentation::BeginReadAccess() {
+sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ // TODO(penghuang): provide begin and end semaphores.
DCHECK_EQ(access_mode_, kNone) << "Previous access hasn't ended yet";
DCHECK(!surface_);
- auto promise_texture = BeginAccess(true /* readonly */);
+ auto promise_texture =
+ BeginAccess(true /* readonly */, begin_semaphores, end_semaphores);
if (!promise_texture)
return nullptr;
access_mode_ = kRead;
@@ -71,40 +80,39 @@ ExternalVkImageSkiaRepresentation::BeginReadAccess() {
void ExternalVkImageSkiaRepresentation::EndReadAccess() {
DCHECK_EQ(access_mode_, kRead)
<< "EndReadAccess is called before BeginReadAccess";
+
EndAccess(true /* readonly */);
access_mode_ = kNone;
}
sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginAccess(
- bool readonly) {
+ bool readonly,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
DCHECK_EQ(access_mode_, kNone);
+ DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
std::vector<SemaphoreHandle> handles;
if (!backing_impl()->BeginAccess(readonly, &handles))
return nullptr;
- std::vector<VkSemaphore> begin_access_semaphores;
for (auto& handle : handles) {
+ DCHECK(handle.is_valid());
VkSemaphore semaphore = vk_implementation()->ImportSemaphoreHandle(
vk_device(), std::move(handle));
- if (semaphore != VK_NULL_HANDLE)
- begin_access_semaphores.push_back(semaphore);
+ DCHECK(semaphore != VK_NULL_HANDLE);
+ // The ownership of semaphore is passed to caller.
+ begin_semaphores->emplace_back();
+ begin_semaphores->back().initVulkan(semaphore);
}
- if (!begin_access_semaphores.empty()) {
- // Submit wait semaphore to the queue. Note that Skia uses the same queue
- // exposed by vk_queue(), so this will work due to Vulkan queue ordering.
- if (!SubmitWaitVkSemaphores(vk_queue(), begin_access_semaphores)) {
- LOG(ERROR) << "Failed to wait on semaphore";
- // Since the semaphore was not actually sent to the queue, it is safe to
- // destroy the |begin_access_semaphores| here.
- DestroySemaphoresImmediate(std::move(begin_access_semaphores));
- return nullptr;
- }
-
- // Enqueue delayed cleanup of the semaphores.
- fence_helper()->EnqueueSemaphoresCleanupForSubmittedWork(
- std::move(begin_access_semaphores));
+ if (backing_impl()->need_sychronization()) {
+ // Create an |end_access_semaphore_| which will be signalled by the caller.
+ end_access_semaphore_ =
+ vk_implementation()->CreateExternalSemaphore(backing_impl()->device());
+ DCHECK(end_access_semaphore_ != VK_NULL_HANDLE);
+ end_semaphores->emplace_back();
+ end_semaphores->back().initVulkan(end_access_semaphore_);
}
// Create backend texture from the VkImage.
@@ -122,58 +130,23 @@ sk_sp<SkPromiseImageTexture> ExternalVkImageSkiaRepresentation::BeginAccess(
void ExternalVkImageSkiaRepresentation::EndAccess(bool readonly) {
DCHECK_NE(access_mode_, kNone);
- VkSemaphore end_access_semaphore =
- vk_implementation()->CreateExternalSemaphore(backing_impl()->device());
- VkFence end_access_fence = VK_NULL_HANDLE;
- if (end_access_semaphore != VK_NULL_HANDLE) {
- if (VK_SUCCESS != fence_helper()->GetFence(&end_access_fence)) {
- // Since the semaphore was not actually sent to the queue, it is safe to
- // destroy the |end_access_semaphore| here.
- DestroySemaphoreImmediate(end_access_semaphore);
- }
- // Submit wait semaphore to the queue. Note that Skia uses the same queue
- // exposed by vk_queue(), so this will work due to Vulkan queue ordering.
- if (!SubmitSignalVkSemaphore(vk_queue(), end_access_semaphore,
- end_access_fence)) {
- LOG(ERROR) << "Failed to wait on semaphore";
- // Since the semaphore was not actually sent to the queue, it is safe to
- // destroy the |end_access_semaphore| here.
- DestroySemaphoreImmediate(end_access_semaphore);
- // Same for the fence.
- vkDestroyFence(vk_device(), end_access_fence, nullptr);
- end_access_fence = VK_NULL_HANDLE;
- end_access_semaphore = VK_NULL_HANDLE;
- }
- }
-
SemaphoreHandle handle;
- if (end_access_semaphore != VK_NULL_HANDLE) {
+ if (backing_impl()->need_sychronization()) {
+ DCHECK(end_access_semaphore_ != VK_NULL_HANDLE);
+
handle = vk_implementation()->GetSemaphoreHandle(vk_device(),
- end_access_semaphore);
- if (!handle.is_valid())
- LOG(FATAL) << "Failed to get handle from a semaphore.";
+ end_access_semaphore_);
+ DCHECK(handle.is_valid());
// We're done with the semaphore, enqueue deferred cleanup.
fence_helper()->EnqueueSemaphoreCleanupForSubmittedWork(
- end_access_semaphore);
- fence_helper()->EnqueueFence(end_access_fence);
+ end_access_semaphore_);
+ end_access_semaphore_ = VK_NULL_HANDLE;
+ } else {
+ DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
}
- backing_impl()->EndAccess(readonly, std::move(handle));
-}
-void ExternalVkImageSkiaRepresentation::DestroySemaphoresImmediate(
- std::vector<VkSemaphore> semaphores) {
- if (semaphores.empty())
- return;
- for (VkSemaphore semaphore : semaphores)
- vkDestroySemaphore(vk_device(), semaphore, nullptr /* pAllocator */);
-}
-
-void ExternalVkImageSkiaRepresentation::DestroySemaphoreImmediate(
- VkSemaphore semaphore) {
- if (semaphore == VK_NULL_HANDLE)
- return;
- return DestroySemaphoresImmediate({semaphore});
+ backing_impl()->EndAccess(readonly, std::move(handle));
}
-} // namespace gpu
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h
index 516766fbc98..dbc57d4ed64 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.h
@@ -26,9 +26,13 @@ class ExternalVkImageSkiaRepresentation : public SharedImageRepresentationSkia {
// SharedImageRepresentationSkia implementation.
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) override;
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
void EndWriteAccess(sk_sp<SkSurface> surface) override;
- sk_sp<SkPromiseImageTexture> BeginReadAccess() override;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
void EndReadAccess() override;
private:
@@ -67,12 +71,12 @@ class ExternalVkImageSkiaRepresentation : public SharedImageRepresentationSkia {
return static_cast<ExternalVkImageBacking*>(backing());
}
- sk_sp<SkPromiseImageTexture> BeginAccess(bool readonly);
- void EndAccess(bool readonly);
+ sk_sp<SkPromiseImageTexture> BeginAccess(
+ bool readonly,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores);
- // Functions used in error cases - immediately clean up semaphores.
- void DestroySemaphoresImmediate(std::vector<VkSemaphore> semaphores);
- void DestroySemaphoreImmediate(VkSemaphore semaphores);
+ void EndAccess(bool readonly);
enum AccessMode {
kNone = 0,
@@ -81,6 +85,7 @@ class ExternalVkImageSkiaRepresentation : public SharedImageRepresentationSkia {
};
AccessMode access_mode_ = kNone;
sk_sp<SkSurface> surface_;
+ VkSemaphore end_access_semaphore_ = VK_NULL_HANDLE;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index a2ed6bee1aa..817d686d984 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -221,6 +221,14 @@ FeatureInfo::FeatureInfo(
gpu_feature_info
.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] ==
gpu::kGpuFeatureStatusEnabled;
+
+#if defined(OS_CHROMEOS)
+ feature_flags_.chromium_image_ycbcr_420v = base::ContainsValue(
+ gpu_feature_info.supported_buffer_formats_for_allocation_and_texturing,
+ gfx::BufferFormat::YUV_420_BIPLANAR);
+#elif defined(OS_MACOSX)
+ feature_flags_.chromium_image_ycbcr_420v = true;
+#endif
}
void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
@@ -492,6 +500,7 @@ void FeatureInfo::InitializeFeatures() {
gfx::HasExtension(extensions, "GL_ANGLE_texture_compression_dxt5");
if (gfx::HasExtension(extensions, "GL_EXT_texture_compression_dxt1") ||
+ gfx::HasExtension(extensions, "GL_ANGLE_texture_compression_dxt1") ||
have_s3tc) {
enable_dxt1 = true;
}
@@ -505,7 +514,7 @@ void FeatureInfo::InitializeFeatures() {
if (enable_dxt1) {
feature_flags_.ext_texture_format_dxt1 = true;
- AddExtensionString("GL_EXT_texture_compression_dxt1");
+ AddExtensionString("GL_ANGLE_texture_compression_dxt1");
validators_.compressed_texture_format.AddValue(
GL_COMPRESSED_RGB_S3TC_DXT1_EXT);
validators_.compressed_texture_format.AddValue(
@@ -519,9 +528,9 @@ void FeatureInfo::InitializeFeatures() {
if (enable_dxt3) {
// The difference between GL_EXT_texture_compression_s3tc and
- // GL_CHROMIUM_texture_compression_dxt3 is that the former
+ // GL_ANGLE_texture_compression_dxt3 is that the former
// requires on the fly compression. The latter does not.
- AddExtensionString("GL_CHROMIUM_texture_compression_dxt3");
+ AddExtensionString("GL_ANGLE_texture_compression_dxt3");
validators_.compressed_texture_format.AddValue(
GL_COMPRESSED_RGBA_S3TC_DXT3_EXT);
validators_.texture_internal_format_storage.AddValue(
@@ -532,9 +541,9 @@ void FeatureInfo::InitializeFeatures() {
feature_flags_.ext_texture_format_dxt5 = true;
// The difference between GL_EXT_texture_compression_s3tc and
- // GL_CHROMIUM_texture_compression_dxt5 is that the former
+ // GL_ANGLE_texture_compression_dxt5 is that the former
// requires on the fly compression. The latter does not.
- AddExtensionString("GL_CHROMIUM_texture_compression_dxt5");
+ AddExtensionString("GL_ANGLE_texture_compression_dxt5");
validators_.compressed_texture_format.AddValue(
GL_COMPRESSED_RGBA_S3TC_DXT5_EXT);
validators_.texture_internal_format_storage.AddValue(
@@ -1094,13 +1103,8 @@ void FeatureInfo::InitializeFeatures() {
validators_.g_l_state.AddValue(GL_TEXTURE_BINDING_RECTANGLE_ARB);
}
-#if defined(OS_MACOSX) || defined(OS_CHROMEOS)
- // TODO(dcastagna): Determine ycbcr_420v_image on CrOS at runtime
- // querying minigbm. https://crbug.com/646148
- AddExtensionString("GL_CHROMIUM_ycbcr_420v_image");
- feature_flags_.chromium_image_ycbcr_420v = true;
-#endif
if (feature_flags_.chromium_image_ycbcr_420v) {
+ AddExtensionString("GL_CHROMIUM_ycbcr_420v_image");
feature_flags_.gpu_memory_buffer_formats.Add(
gfx::BufferFormat::YUV_420_BIPLANAR);
}
@@ -1541,6 +1545,9 @@ void FeatureInfo::InitializeFeatures() {
// https://crbug.com/881152
validators_.shader_parameter.AddValue(GL_COMPLETION_STATUS_KHR);
validators_.program_parameter.AddValue(GL_COMPLETION_STATUS_KHR);
+
+ AddExtensionString("GL_CHROMIUM_completion_query");
+ feature_flags_.chromium_completion_query = true;
}
if (gfx::HasExtension(extensions, "GL_KHR_robust_buffer_access_behavior")) {
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 9ad2b789cab..80c36fe5384 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -146,6 +146,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool nv_internalformat_sample_query = false;
bool amd_framebuffer_multisample_advanced = false;
bool ext_float_blend = false;
+ bool chromium_completion_query = false;
};
FeatureInfo();
diff --git a/chromium/gpu/command_buffer/service/feature_info_unittest.cc b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
index ca61ad0c7c9..e28c3b2b264 100644
--- a/chromium/gpu/command_buffer/service/feature_info_unittest.cc
+++ b/chromium/gpu/command_buffer/service/feature_info_unittest.cc
@@ -277,11 +277,11 @@ TEST_P(FeatureInfoTest, InitializeNoExtensions) {
// Check a couple of random extensions that should not be there.
EXPECT_FALSE(gfx::HasExtension(info_->extensions(), "GL_OES_texture_npot"));
EXPECT_FALSE(gfx::HasExtension(info_->extensions(),
- "GL_EXT_texture_compression_dxt1"));
+ "GL_ANGLE_texture_compression_dxt1"));
EXPECT_FALSE(gfx::HasExtension(info_->extensions(),
- "GL_CHROMIUM_texture_compression_dxt3"));
+ "GL_ANGLE_texture_compression_dxt3"));
EXPECT_FALSE(gfx::HasExtension(info_->extensions(),
- "GL_CHROMIUM_texture_compression_dxt5"));
+ "GL_ANGLE_texture_compression_dxt5"));
EXPECT_FALSE(
gfx::HasExtension(info_->extensions(), "GL_ANGLE_texture_usage"));
EXPECT_FALSE(
@@ -408,9 +408,9 @@ TEST_P(FeatureInfoTest, InitializeNPOTExtensionGL) {
}
TEST_P(FeatureInfoTest, InitializeDXTExtensionGLES2) {
- SetupInitExpectations("GL_EXT_texture_compression_dxt1");
+ SetupInitExpectations("GL_ANGLE_texture_compression_dxt1");
EXPECT_TRUE(gfx::HasExtension(info_->extensions(),
- "GL_EXT_texture_compression_dxt1"));
+ "GL_ANGLE_texture_compression_dxt1"));
EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
@@ -424,11 +424,11 @@ TEST_P(FeatureInfoTest, InitializeDXTExtensionGLES2) {
TEST_P(FeatureInfoTest, InitializeDXTExtensionGL) {
SetupInitExpectations("GL_EXT_texture_compression_s3tc");
EXPECT_TRUE(gfx::HasExtension(info_->extensions(),
- "GL_EXT_texture_compression_dxt1"));
+ "GL_ANGLE_texture_compression_dxt1"));
EXPECT_TRUE(gfx::HasExtension(info_->extensions(),
- "GL_CHROMIUM_texture_compression_dxt3"));
+ "GL_ANGLE_texture_compression_dxt3"));
EXPECT_TRUE(gfx::HasExtension(info_->extensions(),
- "GL_CHROMIUM_texture_compression_dxt5"));
+ "GL_ANGLE_texture_compression_dxt5"));
EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
GL_COMPRESSED_RGB_S3TC_DXT1_EXT));
EXPECT_TRUE(info_->validators()->compressed_texture_format.IsValid(
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.cc b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
index f801bb44162..30a62773a3f 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.cc
@@ -386,7 +386,8 @@ Framebuffer::Framebuffer(FramebufferManager* manager, GLuint service_id)
draw_buffer_bound_mask_(0u),
adjusted_draw_buffer_bound_mask_(0u),
last_color_attachment_id_(-1),
- read_buffer_(GL_COLOR_ATTACHMENT0) {
+ read_buffer_(GL_COLOR_ATTACHMENT0),
+ flip_y_(false) {
manager->StartTracking(this);
DCHECK_GT(manager->max_draw_buffers_, 0u);
draw_buffers_.reset(new GLenum[manager->max_draw_buffers_]);
@@ -539,7 +540,8 @@ void Framebuffer::RestoreDrawBuffers() const {
bool Framebuffer::ValidateAndAdjustDrawBuffers(
uint32_t fragment_output_type_mask, uint32_t fragment_output_written_mask) {
uint32_t mask = draw_buffer_bound_mask_ & fragment_output_written_mask;
- if ((mask & fragment_output_type_mask) != (mask & draw_buffer_type_mask_))
+ if (mask != draw_buffer_bound_mask_ ||
+ (mask & fragment_output_type_mask) != (mask & draw_buffer_type_mask_))
return false;
AdjustDrawBuffersImpl(mask);
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager.h b/chromium/gpu/command_buffer/service/framebuffer_manager.h
index 794df67ddac..1670640372d 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager.h
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager.h
@@ -236,6 +236,9 @@ class GPU_GLES2_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
void UnmarkAsComplete() { framebuffer_complete_state_count_id_ = 0; }
+ bool GetFlipY() const { return flip_y_; }
+ void SetFlipY(bool flip_y) { flip_y_ = flip_y; }
+
private:
friend class FramebufferManager;
friend class base::RefCounted<Framebuffer>;
@@ -315,6 +318,8 @@ class GPU_GLES2_EXPORT Framebuffer : public base::RefCounted<Framebuffer> {
GLenum read_buffer_;
+ bool flip_y_;
+
DISALLOW_COPY_AND_ASSIGN(Framebuffer);
};
diff --git a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
index a19ffa39d1b..22be5a54c8c 100644
--- a/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/framebuffer_manager_unittest.cc
@@ -1250,25 +1250,15 @@ TEST_F(FramebufferInfoTest, DrawBufferMasks) {
// Test ValidateAndAdjustDrawBuffers().
// gl_FragColor situation.
- EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_TRUE(framebuffer_->ValidateAndAdjustDrawBuffers(0x3u, 0x3u));
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _)).Times(0);
+ EXPECT_FALSE(framebuffer_->ValidateAndAdjustDrawBuffers(0x3u, 0x3u));
// gl_FragData situation.
EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
.Times(0);
EXPECT_FALSE(
framebuffer_->ValidateAndAdjustDrawBuffers(0xFFFFFFFFu, 0xFFFFFFFFu));
// User defined output variables, fully match.
- EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_TRUE(
- framebuffer_->ValidateAndAdjustDrawBuffers(0x31Bu, 0x33Fu));
- // Call it a second time - this test is critical, making sure we don't
- // call DrawBuffers() every draw call if program doesn't change.
- EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
- .Times(0);
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _)).Times(0);
EXPECT_TRUE(
framebuffer_->ValidateAndAdjustDrawBuffers(0x31Bu, 0x33Fu));
// User defined output variables, fully on, one type mismatch.
@@ -1277,23 +1267,12 @@ TEST_F(FramebufferInfoTest, DrawBufferMasks) {
EXPECT_FALSE(
framebuffer_->ValidateAndAdjustDrawBuffers(0x32Bu, 0x33Fu));
// Empty output.
- EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_TRUE(
- framebuffer_->ValidateAndAdjustDrawBuffers(0u, 0u));
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _)).Times(0);
+ EXPECT_FALSE(framebuffer_->ValidateAndAdjustDrawBuffers(0u, 0u));
// User defined output variables, some active buffers have no corresponding
// output variables, but if they do, types match.
- EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_TRUE(
- framebuffer_->ValidateAndAdjustDrawBuffers(0x310u, 0x330u));
- // Call it a second time - making sure DrawBuffers isn't triggered.
- EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _))
- .Times(0);
- EXPECT_TRUE(
- framebuffer_->ValidateAndAdjustDrawBuffers(0x310u, 0x330u));
+ EXPECT_CALL(*gl_, DrawBuffersARB(kMaxDrawBuffers, _)).Times(0);
+ EXPECT_FALSE(framebuffer_->ValidateAndAdjustDrawBuffers(0x310u, 0x330u));
}
class FramebufferInfoFloatTest : public FramebufferInfoTestBase {
diff --git a/chromium/gpu/command_buffer/service/gl_utils.cc b/chromium/gpu/command_buffer/service/gl_utils.cc
index fbfef93de14..ee8cd5d2303 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.cc
+++ b/chromium/gpu/command_buffer/service/gl_utils.cc
@@ -43,9 +43,10 @@ bool IsValidPVRTCSize(GLint level, GLsizei size) {
return GLES2Util::IsPOT(size);
}
-bool IsValidS3TCSizeForWebGL(GLint level, GLsizei size) {
- // WebGL only allows multiple-of-4 sizes, except for levels > 0 where it also
- // allows 1 or 2. See WEBGL_compressed_texture_s3tc.
+bool IsValidS3TCSizeForWebGLAndANGLE(GLint level, GLsizei size) {
+ // WebGL and ANGLE only allow multiple-of-4 sizes, except for levels > 0 where
+ // it also allows 1 or 2. See WEBGL_compressed_texture_s3tc and
+ // ANGLE_compressed_texture_dxt*
return (level && size == 1) || (level && size == 2) ||
!(size % kS3TCBlockWidth);
}
@@ -532,7 +533,6 @@ bool ValidateCompressedTexSubDimensions(GLenum target,
GLsizei depth,
GLenum format,
Texture* texture,
- bool restrict_for_webgl,
const char** error_message) {
if (xoffset < 0 || yoffset < 0 || zoffset < 0) {
*error_message = "x/y/z offset < 0";
@@ -655,8 +655,7 @@ bool ValidateCompressedTexSubDimensions(GLenum target,
return false;
}
return ValidateCompressedTexDimensions(target, level, width, height, 1,
- format, restrict_for_webgl,
- error_message);
+ format, error_message);
}
// ES3 formats
@@ -696,7 +695,6 @@ bool ValidateCompressedTexDimensions(GLenum target,
GLsizei height,
GLsizei depth,
GLenum format,
- bool restrict_for_webgl,
const char** error_message) {
switch (format) {
case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
@@ -708,8 +706,8 @@ bool ValidateCompressedTexDimensions(GLenum target,
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT:
case GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT:
DCHECK_EQ(1, depth); // 2D formats.
- if (restrict_for_webgl && (!IsValidS3TCSizeForWebGL(level, width) ||
- !IsValidS3TCSizeForWebGL(level, height))) {
+ if (!IsValidS3TCSizeForWebGLAndANGLE(level, width) ||
+ !IsValidS3TCSizeForWebGLAndANGLE(level, height)) {
*error_message = "width or height invalid for level";
return false;
}
@@ -903,23 +901,6 @@ CopyTextureMethod GetCopyTextureCHROMIUMMethod(const FeatureInfo* feature_info,
break;
}
- // CopyTex{Sub}Image2D() from GL_RGB10_A2 has issues on some Android chipsets.
- if (source_internal_format == GL_RGB10_A2) {
- if (feature_info->workarounds().disable_copy_tex_image_2d_rgb10_a2_tegra) {
- if (dest_internal_format == GL_RGBA4)
- return CopyTextureMethod::DIRECT_DRAW;
- return CopyTextureMethod::DRAW_AND_COPY;
- }
- if (feature_info->workarounds().disable_copy_tex_image_2d_rgb10_a2_adreno &&
- dest_internal_format != GL_RGB10_A2) {
- return CopyTextureMethod::DRAW_AND_COPY;
- }
- if (feature_info->workarounds().disable_copy_tex_image_2d_rgb10_a2_mali &&
- (dest_internal_format == GL_RGB || dest_internal_format == GL_RGBA)) {
- return CopyTextureMethod::DRAW_AND_COPY;
- }
- }
-
// CopyTexImage* should not allow internalformat of GL_BGRA_EXT and
// GL_BGRA8_EXT. https://crbug.com/663086.
bool copy_tex_image_format_valid =
@@ -931,6 +912,15 @@ CopyTextureMethod GetCopyTextureCHROMIUMMethod(const FeatureInfo* feature_info,
source_internal_format, source_type,
&output_error_msg);
+ // The ES3 spec is vague about whether or not glCopyTexImage2D from a
+ // GL_RGB10_A2 attachment to an unsized internal format is valid. Most drivers
+ // interpreted the explicit call out as not valid (and dEQP actually checks
+ // this), so avoid DIRECT_COPY in that case.
+ if (feature_info->gl_version_info().is_es &&
+ source_internal_format == GL_RGB10_A2 &&
+ dest_internal_format != source_internal_format)
+ copy_tex_image_format_valid = false;
+
// TODO(qiankun.miao@intel.com): for WebGL 2.0 or OpenGL ES 3.0, both
// DIRECT_DRAW path for dest_level > 0 and DIRECT_COPY path for source_level >
// 0 are not available due to a framebuffer completeness bug:
diff --git a/chromium/gpu/command_buffer/service/gl_utils.h b/chromium/gpu/command_buffer/service/gl_utils.h
index 60f0b808aba..28e38fca0e7 100644
--- a/chromium/gpu/command_buffer/service/gl_utils.h
+++ b/chromium/gpu/command_buffer/service/gl_utils.h
@@ -116,7 +116,6 @@ bool ValidateCompressedTexSubDimensions(GLenum target,
GLsizei depth,
GLenum format,
Texture* texture,
- bool restrict_for_webgl,
const char** error_message);
bool ValidateCompressedTexDimensions(GLenum target,
@@ -125,7 +124,6 @@ bool ValidateCompressedTexDimensions(GLenum target,
GLsizei height,
GLsizei depth,
GLenum format,
- bool restrict_for_webgl,
const char** error_message);
bool ValidateCopyTexFormatHelper(const FeatureInfo* feature_info,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
index cceecc6d6a6..d5492209fea 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.cc
@@ -26,6 +26,7 @@ ApplyFramebufferAttachmentCMAAINTELResourceManager::
supports_usampler_(true),
supports_r8_image_(true),
is_gles31_compatible_(false),
+ flip_y_(false),
frame_id_(0),
width_(0),
height_(0),
@@ -226,7 +227,7 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::
GLenum internal_format = attachment->internal_format();
// Resize internal structures - only if needed.
- OnSize(width, height);
+ OnSize(width, height, framebuffer->GetFlipY());
// CMAA internally expects GL_RGBA8 textures.
// Process using a GL_RGBA8 copy if this is not the case.
@@ -500,14 +501,16 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::ApplyCMAAEffectTexture(
}
void ApplyFramebufferAttachmentCMAAINTELResourceManager::OnSize(GLint width,
- GLint height) {
- if (height_ == height && width_ == width)
+ GLint height,
+ bool flip_y) {
+ if (height_ == height && width_ == width && flip_y_ == flip_y)
return;
ReleaseTextures();
height_ = height;
width_ = width;
+ flip_y_ = flip_y;
glGenTextures(1, &rgba8_texture_);
glBindTexture(GL_TEXTURE_2D, rgba8_texture_);
@@ -549,6 +552,10 @@ void ApplyFramebufferAttachmentCMAAINTELResourceManager::OnSize(GLint width,
// Create the FBO
glGenFramebuffersEXT(1, &cmaa_framebuffer_);
glBindFramebufferEXT(GL_FRAMEBUFFER, cmaa_framebuffer_);
+ if (flip_y_) {
+ glFramebufferParameteri(GL_FRAMEBUFFER, GL_FRAMEBUFFER_FLIP_Y_MESA,
+ GL_TRUE);
+ }
// We need to clear the textures before they are first used.
// The algorithm self-clears them later.
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
index 33bade31d60..11ded046340 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_apply_framebuffer_attachment_cmaa_intel.h
@@ -44,7 +44,7 @@ class GPU_GLES2_EXPORT ApplyFramebufferAttachmentCMAAINTELResourceManager {
GLuint dest_texture,
bool do_copy);
- void OnSize(GLint width, GLint height);
+ void OnSize(GLint width, GLint height, bool flip_y);
void ReleaseTextures();
GLuint CreateProgram(const char* defines,
@@ -58,6 +58,7 @@ class GPU_GLES2_EXPORT ApplyFramebufferAttachmentCMAAINTELResourceManager {
bool supports_usampler_;
bool supports_r8_image_;
bool is_gles31_compatible_;
+ bool flip_y_;
int frame_id_;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index 71ae06b85d7..9d5d01c2879 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -4242,6 +4242,7 @@ Capabilities GLES2DecoderImpl::GetCapabilities() {
caps.dc_layers = supports_dc_layers_;
caps.use_dc_overlays_for_video = surface_->UseOverlaysForVideo();
caps.protected_video_swap_chain = surface_->SupportsProtectedVideo();
+ caps.gpu_vsync = surface_->SupportsGpuVSync();
caps.blend_equation_advanced =
feature_info_->feature_flags().blend_equation_advanced;
@@ -8258,12 +8259,19 @@ void GLES2DecoderImpl::DoFramebufferParameteri(GLenum target,
GLenum pname,
GLint param) {
const char* func_name = "glFramebufferParameteri";
+ DCHECK(pname == GL_FRAMEBUFFER_FLIP_Y_MESA);
Framebuffer* framebuffer = GetFramebufferInfoForTarget(target);
if (!framebuffer) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, func_name, "no framebuffer bound");
return;
}
+ if (param != GL_TRUE && param != GL_FALSE) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_VALUE, func_name,
+ "invalid parameter, only GL_TRUE or GL_FALSE accepted");
+ return;
+ }
api()->glFramebufferParameteriFn(target, pname, param);
+ framebuffer->SetFlipY(param == GL_TRUE);
}
void GLES2DecoderImpl::DoFramebufferRenderbuffer(
@@ -9971,6 +9979,9 @@ bool GLES2DecoderImpl::SupportsDrawBuffers() const {
}
bool GLES2DecoderImpl::ValidateAndAdjustDrawBuffers(const char* func_name) {
+ if (state_.GetEnabled(GL_RASTERIZER_DISCARD)) {
+ return true;
+ }
if (!SupportsDrawBuffers()) {
return true;
}
@@ -9978,6 +9989,10 @@ bool GLES2DecoderImpl::ValidateAndAdjustDrawBuffers(const char* func_name) {
if (!state_.current_program.get() || !framebuffer) {
return true;
}
+ if (!state_.color_mask_red && !state_.color_mask_green &&
+ !state_.color_mask_blue && !state_.color_mask_alpha) {
+ return true;
+ }
uint32_t fragment_output_type_mask =
state_.current_program->fragment_output_type_mask();
uint32_t fragment_output_written_mask =
@@ -14499,8 +14514,7 @@ bool GLES2DecoderImpl::ValidateCompressedTexDimensions(
GLsizei width, GLsizei height, GLsizei depth, GLenum format) {
const char* error_message = "";
if (!::gpu::gles2::ValidateCompressedTexDimensions(
- target, level, width, height, depth, format,
- feature_info_->IsWebGLContext(), &error_message)) {
+ target, level, width, height, depth, format, &error_message)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name, error_message);
return false;
}
@@ -14515,7 +14529,7 @@ bool GLES2DecoderImpl::ValidateCompressedTexSubDimensions(
const char* error_message = "";
if (!::gpu::gles2::ValidateCompressedTexSubDimensions(
target, level, xoffset, yoffset, zoffset, width, height, depth,
- format, texture, feature_info_->IsWebGLContext(), &error_message)) {
+ format, texture, &error_message)) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, function_name, error_message);
return false;
}
@@ -17206,6 +17220,13 @@ error::Error GLES2DecoderImpl::HandleBeginQueryEXT(
return error::kNoError;
}
break;
+ case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
+ if (!features().chromium_completion_query) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginQueryEXT",
+ "not enabled for program completion queries");
+ return error::kNoError;
+ }
+ break;
case GL_SAMPLES_PASSED_ARB:
if (!features().occlusion_query) {
LOCAL_SET_GL_ERROR(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 6b9c176a0dc..15fd3f99101 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -752,6 +752,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
"GL_ANGLE_framebuffer_multisample",
"GL_ANGLE_instanced_arrays",
"GL_ANGLE_pack_reverse_row_order",
+ "GL_ANGLE_texture_compression_dxt1",
"GL_ANGLE_texture_compression_dxt3",
"GL_ANGLE_texture_compression_dxt5",
"GL_ANGLE_texture_usage",
@@ -1368,6 +1369,7 @@ gpu::Capabilities GLES2DecoderPassthroughImpl::GetCapabilities() {
caps.commit_overlay_planes = surface_->SupportsCommitOverlayPlanes();
caps.use_dc_overlays_for_video = surface_->UseOverlaysForVideo();
caps.protected_video_swap_chain = surface_->SupportsProtectedVideo();
+ caps.gpu_vsync = surface_->SupportsGpuVSync();
caps.texture_npot = feature_info_->feature_flags().npot_ok;
caps.chromium_gpu_fence = feature_info_->feature_flags().chromium_gpu_fence;
caps.chromium_nonblocking_readback = true;
@@ -1697,6 +1699,9 @@ void GLES2DecoderPassthroughImpl::BindOnePendingImage(
// However, for now, we only try once.
texture->set_is_bind_pending(false);
+ // Update any binding points that are currently bound for this texture.
+ RebindTexture(texture);
+
// No client ID available here, can this texture already be discardable?
UpdateTextureSizeFromTexturePassthrough(texture, 0);
}
@@ -2055,6 +2060,7 @@ bool GLES2DecoderPassthroughImpl::IsEmulatedQueryTarget(GLenum target) const {
case GL_LATENCY_QUERY_CHROMIUM:
case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
case GL_GET_ERROR_QUERY_CHROMIUM:
+ case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
return true;
default:
@@ -2062,9 +2068,18 @@ bool GLES2DecoderPassthroughImpl::IsEmulatedQueryTarget(GLenum target) const {
}
}
+bool GLES2DecoderPassthroughImpl::OnlyHasPendingProgramCompletionQueries() {
+ return std::find_if(pending_queries_.begin(), pending_queries_.end(),
+ [](const auto& query) {
+ return query.target !=
+ GL_PROGRAM_COMPLETION_QUERY_CHROMIUM;
+ }) == pending_queries_.end();
+}
+
error::Error GLES2DecoderPassthroughImpl::ProcessQueries(bool did_finish) {
+ bool program_completion_query_deferred = false;
while (!pending_queries_.empty()) {
- const PendingQuery& query = pending_queries_.front();
+ PendingQuery& query = pending_queries_.front();
GLuint result_available = GL_FALSE;
GLuint64 result = 0;
switch (query.target) {
@@ -2124,6 +2139,35 @@ error::Error GLES2DecoderPassthroughImpl::ProcessQueries(bool did_finish) {
result = PopError();
break;
+ case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
+ GLint status;
+ if (!api()->glIsProgramFn(query.program_service_id)) {
+ status = GL_TRUE;
+ } else {
+ api()->glGetProgramivFn(query.program_service_id,
+ GL_COMPLETION_STATUS_KHR, &status);
+ }
+ result_available = (status == GL_TRUE);
+ if (!result_available) {
+ // Move the query to the end of queue, so that other queries may have
+ // chance to be processed.
+ auto temp = std::move(query);
+ pending_queries_.pop_front();
+ pending_queries_.emplace_back(std::move(temp));
+ if (did_finish && !OnlyHasPendingProgramCompletionQueries()) {
+ continue;
+ } else {
+ program_completion_query_deferred = true;
+ }
+ result = 0;
+ } else {
+ GLint link_status = 0;
+ api()->glGetProgramivFn(query.program_service_id, GL_LINK_STATUS,
+ &link_status);
+ result = link_status;
+ }
+ break;
+
default:
DCHECK(!IsEmulatedQueryTarget(query.target));
if (did_finish) {
@@ -2158,7 +2202,8 @@ error::Error GLES2DecoderPassthroughImpl::ProcessQueries(bool did_finish) {
// If api()->glFinishFn() has been called, all of our queries should be
// completed.
- DCHECK(!did_finish || pending_queries_.empty());
+ DCHECK(!did_finish || pending_queries_.empty() ||
+ program_completion_query_deferred);
return error::kNoError;
}
@@ -2352,6 +2397,35 @@ void GLES2DecoderPassthroughImpl::UpdateTextureBinding(
}
}
+void GLES2DecoderPassthroughImpl::RebindTexture(TexturePassthrough* texture) {
+ DCHECK(texture != nullptr);
+ size_t cur_texture_unit = active_texture_unit_;
+ GLenum target = texture->target();
+ auto& target_bound_textures =
+ bound_textures_[static_cast<size_t>(GLenumToTextureTarget(target))];
+ for (size_t bound_texture_index = 0;
+ bound_texture_index < target_bound_textures.size();
+ bound_texture_index++) {
+ if (target_bound_textures[bound_texture_index].texture == texture) {
+ // Update the active texture unit if needed
+ if (bound_texture_index != cur_texture_unit) {
+ api()->glActiveTextureFn(
+ static_cast<GLenum>(GL_TEXTURE0 + bound_texture_index));
+ cur_texture_unit = bound_texture_index;
+ }
+
+ // Update the texture binding
+ api()->glBindTextureFn(target, texture->service_id());
+ }
+ }
+
+ // Reset the active texture unit if it was changed
+ if (cur_texture_unit != active_texture_unit_) {
+ api()->glActiveTextureFn(
+ static_cast<GLenum>(GL_TEXTURE0 + active_texture_unit_));
+ }
+}
+
void GLES2DecoderPassthroughImpl::UpdateTextureSizeFromTexturePassthrough(
TexturePassthrough* texture,
GLuint client_id) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index 7470c03bfc6..1fa45e8311c 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -446,6 +446,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void UpdateTextureBinding(GLenum target,
GLuint client_id,
TexturePassthrough* texture);
+ void RebindTexture(TexturePassthrough* texture);
void UpdateTextureSizeFromTexturePassthrough(TexturePassthrough* texture,
GLuint client_id);
@@ -511,6 +512,8 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
}
}
+ bool OnlyHasPendingProgramCompletionQueries();
+
// A set of raw pointers to currently living PassthroughAbstractTextures
// which allow us to properly signal to them when we are destroyed.
base::flat_set<PassthroughAbstractTextureImpl*> abstract_textures_;
@@ -670,6 +673,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
std::vector<base::OnceClosure> callbacks;
std::unique_ptr<gl::GLFence> buffer_shadow_update_fence = nullptr;
BufferShadowUpdateMap buffer_shadow_updates;
+ GLuint program_service_id = 0u;
};
base::circular_deque<PendingQuery> pending_queries_;
@@ -849,6 +853,8 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
// get rescheduled.
std::vector<std::unique_ptr<gl::GLFence>> deschedule_until_finished_fences_;
+ GLuint linking_program_service_id_ = 0u;
+
base::WeakPtrFactory<GLES2DecoderPassthroughImpl> weak_ptr_factory_;
// Include the prototypes of all the doer functions from a separate header to
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index 643e0a1e8d8..c8113522fa4 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -9,6 +9,7 @@
#include "base/strings/string_number_conversions.h"
#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/command_buffer/service/decoder_client.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/gpu_fence_manager.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
#include "gpu/command_buffer/service/multi_draw_manager.h"
@@ -2226,12 +2227,15 @@ error::Error GLES2DecoderPassthroughImpl::DoLineWidth(GLfloat width) {
error::Error GLES2DecoderPassthroughImpl::DoLinkProgram(GLuint program) {
TRACE_EVENT0("gpu", "GLES2DecoderPassthroughImpl::DoLinkProgram");
SCOPED_UMA_HISTOGRAM_TIMER("GPU.PassthroughDoLinkProgramTime");
- api()->glLinkProgramFn(GetProgramServiceID(program, resources_));
+ GLuint program_service_id = GetProgramServiceID(program, resources_);
+ api()->glLinkProgramFn(program_service_id);
// Program linking can be very slow. Exit command processing to allow for
// context preemption and GPU watchdog checks.
ExitCommandProcessingEarly();
+ linking_program_service_id_ = program_service_id;
+
return error::kNoError;
}
@@ -3360,6 +3364,9 @@ error::Error GLES2DecoderPassthroughImpl::DoBeginQueryEXT(
if (!sync)
return error::kOutOfBounds;
+ if (target == GL_PROGRAM_COMPLETION_QUERY_CHROMIUM) {
+ linking_program_service_id_ = 0u;
+ }
if (IsEmulatedQueryTarget(target)) {
if (active_queries_.find(target) != active_queries_.end()) {
InsertError(GL_INVALID_OPERATION, "Query already active on target.");
@@ -3456,6 +3463,10 @@ error::Error GLES2DecoderPassthroughImpl::DoEndQueryEXT(GLenum target,
buffer_shadow_updates_.clear();
break;
+ case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
+ pending_query.program_service_id = linking_program_service_id_;
+ break;
+
default:
break;
}
@@ -4872,7 +4883,8 @@ error::Error GLES2DecoderPassthroughImpl::DoBindFragDataLocationIndexedEXT(
GLuint colorNumber,
GLuint index,
const char* name) {
- NOTIMPLEMENTED();
+ api()->glBindFragDataLocationIndexedFn(
+ GetProgramServiceID(program, resources_), colorNumber, index, name);
return error::kNoError;
}
@@ -4880,7 +4892,8 @@ error::Error GLES2DecoderPassthroughImpl::DoBindFragDataLocationEXT(
GLuint program,
GLuint colorNumber,
const char* name) {
- NOTIMPLEMENTED();
+ api()->glBindFragDataLocationFn(GetProgramServiceID(program, resources_),
+ colorNumber, name);
return error::kNoError;
}
@@ -4888,7 +4901,8 @@ error::Error GLES2DecoderPassthroughImpl::DoGetFragDataIndexEXT(
GLuint program,
const char* name,
GLint* index) {
- NOTIMPLEMENTED();
+ *index = api()->glGetFragDataIndexFn(GetProgramServiceID(program, resources_),
+ name);
return error::kNoError;
}
@@ -4896,8 +4910,38 @@ error::Error
GLES2DecoderPassthroughImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint location,
GLboolean transpose,
- const volatile GLfloat* defaultValue) {
- NOTIMPLEMENTED();
+ const volatile GLfloat* transform) {
+ constexpr GLenum kTextureTarget = GL_TEXTURE_EXTERNAL_OES;
+ scoped_refptr<TexturePassthrough> bound_texture =
+ bound_textures_[static_cast<size_t>(
+ GLenumToTextureTarget(kTextureTarget))][active_texture_unit_]
+ .texture;
+ if (!bound_texture) {
+ InsertError(GL_INVALID_OPERATION, "no texture bound");
+ return error::kNoError;
+ }
+
+ float gl_matrix[16] = {};
+
+ GLStreamTextureImage* image =
+ bound_texture->GetStreamLevelImage(kTextureTarget, 0);
+ if (image) {
+ gfx::Transform st_transform(gfx::Transform::kSkipInitialization);
+ gfx::Transform pre_transform(gfx::Transform::kSkipInitialization);
+ image->GetTextureMatrix(gl_matrix);
+ st_transform.matrix().setColMajorf(gl_matrix);
+ // const_cast is safe, because setColMajorf only does a memcpy.
+ // TODO(piman): can we remove this assumption without having to introduce
+ // an extra copy?
+ pre_transform.matrix().setColMajorf(const_cast<const GLfloat*>(transform));
+ gfx::Transform(pre_transform, st_transform).matrix().asColMajorf(gl_matrix);
+ } else {
+ // Missing stream texture. Treat matrix as identity.
+ memcpy(gl_matrix, const_cast<const GLfloat*>(transform), sizeof(gl_matrix));
+ }
+
+ api()->glUniformMatrix4fvFn(location, 1, transpose, gl_matrix);
+
return error::kNoError;
}
@@ -4908,7 +4952,28 @@ error::Error GLES2DecoderPassthroughImpl::DoOverlayPromotionHintCHROMIUM(
GLint display_y,
GLint display_width,
GLint display_height) {
- NOTIMPLEMENTED();
+ if (texture == 0) {
+ return error::kNoError;
+ }
+
+ scoped_refptr<TexturePassthrough> passthrough_texture = nullptr;
+ if (!resources_->texture_object_map.GetServiceID(texture,
+ &passthrough_texture) ||
+ passthrough_texture == nullptr) {
+ InsertError(GL_INVALID_VALUE, "invalid texture id");
+ return error::kNoError;
+ }
+
+ GLStreamTextureImage* image =
+ passthrough_texture->GetStreamLevelImage(GL_TEXTURE_EXTERNAL_OES, 0);
+ if (!image) {
+ InsertError(GL_INVALID_OPERATION, "texture has no StreamTextureImage");
+ return error::kNoError;
+ }
+
+ image->NotifyPromotionHint(promotion_hint != GL_FALSE, display_x, display_y,
+ display_width, display_height);
+
return error::kNoError;
}
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
index 68ee75ff412..23d989601e7 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_textures.cc
@@ -1886,9 +1886,9 @@ TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TC) {
// test non-block-size width.
bucket->SetSize(test.block_size * 2);
- DoCompressedTexImage2D(
- GL_TEXTURE_2D, 0, test.format, 5, 4, 0, test.block_size * 2, kBucketId);
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 5, 4, kBucketId);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
// test small height.
DoCompressedTexImage2D(
@@ -1896,10 +1896,10 @@ TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TC) {
EXPECT_EQ(GL_NO_ERROR, GetGLError());
// test non-block-size height.
+ cmd.Init(GL_TEXTURE_2D, 0, test.format, 4, 5, kBucketId);
bucket->SetSize(test.block_size * 2);
- DoCompressedTexImage2D(
- GL_TEXTURE_2D, 0, test.format, 4, 5, 0, test.block_size * 2, kBucketId);
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_INVALID_OPERATION, GetGLError());
// test small for level 0.
DoCompressedTexImage2D(
@@ -2004,77 +2004,6 @@ TEST_P(GLES2DecoderManualInitTest, CompressedTexImage2DS3TC) {
}
}
}
-
- // Test a 13x13
- DoCompressedTexImage2D(GL_TEXTURE_2D,
- 0,
- test.format,
- 13,
- 13,
- 0,
- test.block_size * 4 * 4,
- kBucketId);
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-
- {
- // Accept non-multiple-of-4 width sub image if it aligns to the right
- GLint xoffset = 12;
- GLint width = 1;
- GLint yoffset = 0;
- GLint height = 4;
- bucket->SetSize(test.block_size);
-
- EXPECT_CALL(*gl_,
- CompressedTexSubImage2D(GL_TEXTURE_2D, 0, xoffset, yoffset,
- width, height, test.format, _, _))
- .Times(1)
- .RetiresOnSaturation();
- sub_cmd.Init(GL_TEXTURE_2D, 0, xoffset, yoffset, width, height,
- test.format, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- {
- // Accept non-multiple-of-4 height sub image if it aligns to the bottom
- GLint xoffset = 0;
- GLint width = 4;
- GLint yoffset = 12;
- GLint height = 1;
- bucket->SetSize(test.block_size);
-
- EXPECT_CALL(*gl_,
- CompressedTexSubImage2D(GL_TEXTURE_2D, 0, xoffset, yoffset,
- width, height, test.format, _, _))
- .Times(1)
- .RetiresOnSaturation();
- sub_cmd.Init(GL_TEXTURE_2D, 0, xoffset, yoffset, width, height,
- test.format, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
- {
- // Check that partial blocks are still fully counted.
- // Those 25 pixels still need to use 4 blocks.
- GLint xoffset = 8;
- GLint width = 5;
- GLint yoffset = 8;
- GLint height = 5;
- bucket->SetSize(test.block_size * 3);
-
- sub_cmd.Init(GL_TEXTURE_2D, 0, xoffset, yoffset, width, height,
- test.format, kBucketId);
- EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
- EXPECT_EQ(GL_INVALID_VALUE, GetGLError());
-
- bucket->SetSize(test.block_size * 4);
- EXPECT_CALL(*gl_,
- CompressedTexSubImage2D(GL_TEXTURE_2D, 0, xoffset, yoffset,
- width, height, test.format, _, _))
- .Times(1)
- .RetiresOnSaturation();
- EXPECT_EQ(error::kNoError, ExecuteCmd(sub_cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
- }
}
}
@@ -3941,8 +3870,8 @@ TEST_P(GLES2DecoderManualInitTest, DrawWithGLImageExternal) {
scoped_refptr<MockGLImage> image(new MockGLImage);
group().texture_manager()->SetTarget(texture_ref, GL_TEXTURE_EXTERNAL_OES);
group().texture_manager()->SetLevelInfo(texture_ref, GL_TEXTURE_EXTERNAL_OES,
- 0, GL_RGBA, 0, 0, 1, 0, GL_RGBA,
- GL_UNSIGNED_BYTE, gfx::Rect());
+ 0, GL_RGBA, 1, 1, 1, 0, GL_RGBA,
+ GL_UNSIGNED_BYTE, gfx::Rect(1, 1));
group().texture_manager()->SetLevelImage(texture_ref, GL_TEXTURE_EXTERNAL_OES,
0, image.get(), Texture::BOUND);
@@ -5046,6 +4975,30 @@ TEST_P(GLES3DecoderTest, ImmutableTextureBaseLevelMaxLevelClamping) {
}
}
+TEST_P(GLES3DecoderTest, ClearRenderableLevelsWithOutOfRangeBaseLevel) {
+ // Regression test for https://crbug.com/983938
+ DoBindTexture(GL_TEXTURE_2D, client_texture_id_, kServiceTextureId);
+ DoTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 8, 8, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0,
+ 0);
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+
+ TextureManager* manager = group().texture_manager();
+ TextureRef* texture_ref = manager->GetTexture(client_texture_id_);
+ ASSERT_TRUE(texture_ref != nullptr);
+
+ {
+ EXPECT_CALL(*gl_, TexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 55));
+ TexParameteri cmd;
+ cmd.Init(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 55);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+ }
+
+ // The following call will trigger out-of-bounds access in asan build
+ // without fixing the bug.
+ manager->ClearRenderableLevels(GetDecoder(), texture_ref);
+}
+
// TODO(gman): Complete this test.
// TEST_P(GLES2DecoderTest, CompressedTexImage2DGLError) {
// }
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index 1b0ea9291cb..e4e661189e3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -430,12 +430,8 @@ static const GLenum valid_g_l_state_table_es3[] = {
GL_TRANSFORM_FEEDBACK_ACTIVE,
GL_TRANSFORM_FEEDBACK_BUFFER_BINDING,
GL_TRANSFORM_FEEDBACK_PAUSED,
- GL_TRANSFORM_FEEDBACK_BUFFER_SIZE,
- GL_TRANSFORM_FEEDBACK_BUFFER_START,
GL_UNIFORM_BUFFER_BINDING,
GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT,
- GL_UNIFORM_BUFFER_SIZE,
- GL_UNIFORM_BUFFER_START,
GL_UNPACK_IMAGE_HEIGHT,
GL_UNPACK_ROW_LENGTH,
GL_UNPACK_SKIP_IMAGES,
@@ -753,6 +749,7 @@ bool Validators::QueryTargetValidator::IsValid(const GLenum value) const {
case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM:
case GL_COMMANDS_COMPLETED_CHROMIUM:
case GL_READBACK_SHADOW_COPIES_UPDATED_CHROMIUM:
+ case GL_PROGRAM_COMPLETION_QUERY_CHROMIUM:
return true;
}
return false;
diff --git a/chromium/gpu/command_buffer/service/passthrough_abstract_texture_impl.cc b/chromium/gpu/command_buffer/service/passthrough_abstract_texture_impl.cc
index 4171fb81ede..d91a9412ccf 100644
--- a/chromium/gpu/command_buffer/service/passthrough_abstract_texture_impl.cc
+++ b/chromium/gpu/command_buffer/service/passthrough_abstract_texture_impl.cc
@@ -7,6 +7,7 @@
#include "gpu/command_buffer/service/abstract_texture.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/error_state.h"
+#include "gpu/command_buffer/service/gl_stream_texture_image.h"
#include "gpu/command_buffer/service/passthrough_abstract_texture_impl.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "ui/gl/gl_context.h"
@@ -73,7 +74,17 @@ void PassthroughAbstractTextureImpl::BindImage(gl::GLImage* image,
void PassthroughAbstractTextureImpl::BindStreamTextureImage(
GLStreamTextureImage* image,
GLuint service_id) {
- NOTREACHED();
+ DCHECK(image);
+ DCHECK(!decoder_managed_image_);
+
+ if (!texture_passthrough_)
+ return;
+
+ const GLuint target = texture_passthrough_->target();
+ const GLint level = 0;
+
+ texture_passthrough_->set_is_bind_pending(true);
+ texture_passthrough_->SetStreamLevelImage(target, level, image, service_id);
}
gl::GLImage* PassthroughAbstractTextureImpl::GetImage() const {
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 896da608b3c..208fb4b5809 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -58,11 +58,13 @@
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "gpu/command_buffer/service/wrapped_sk_image.h"
+#include "gpu/vulkan/buildflags.h"
#include "third_party/skia/include/core/SkCanvas.h"
#include "third_party/skia/include/core/SkDeferredDisplayListRecorder.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/core/SkTypeface.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrContext.h"
#include "third_party/skia/include/gpu/GrTypes.h"
@@ -72,6 +74,11 @@
#include "ui/gl/gl_surface.h"
#include "ui/gl/gl_version_info.h"
+#if BUILDFLAG(ENABLE_VULKAN)
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
+#endif
+
// Local versions of the SET_GL_ERROR macros
#define LOCAL_SET_GL_ERROR(error, function_name, msg) \
ERRORSTATE_SET_GL_ERROR(error_state_.get(), error, function_name, msg)
@@ -345,6 +352,7 @@ class RasterDecoderImpl final : public RasterDecoder,
// ServiceFontManager::Client implementation.
scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) override;
+ void ReportProgress() override;
private:
gles2::ContextState* state() const {
@@ -374,6 +382,8 @@ class RasterDecoderImpl final : public RasterDecoder,
void FlushToWorkAroundMacCrashes() {
#if defined(OS_MACOSX)
+ if (!shared_context_state_->GrContextIsGL())
+ return;
// This function does aggressive flushes to work around crashes in the
// macOS OpenGL driver.
// https://crbug.com/906453
@@ -554,6 +564,7 @@ class RasterDecoderImpl final : public RasterDecoder,
scoped_refptr<ServiceFontManager> font_manager_;
std::unique_ptr<SharedImageRepresentationSkia> shared_image_;
sk_sp<SkSurface> sk_surface_;
+ std::vector<GrBackendSemaphore> end_semaphores_;
std::unique_ptr<cc::ServicePaintCache> paint_cache_;
std::unique_ptr<SkDeferredDisplayListRecorder> recorder_;
@@ -679,10 +690,7 @@ RasterDecoderImpl::RasterDecoderImpl(
DCHECK(shared_context_state_);
}
-RasterDecoderImpl::~RasterDecoderImpl() {
- if (supports_oop_raster_)
- transfer_cache()->DeleteAllEntriesForDecoder(raster_decoder_id_);
-}
+RasterDecoderImpl::~RasterDecoderImpl() = default;
base::WeakPtr<DecoderContext> RasterDecoderImpl::AsWeakPtr() {
return weak_ptr_factory_.GetWeakPtr();
@@ -755,6 +763,10 @@ void RasterDecoderImpl::Destroy(bool have_context) {
DCHECK(!have_context || shared_context_state_->context()->IsCurrent(nullptr));
if (have_context) {
+ if (supports_oop_raster_) {
+ transfer_cache()->DeleteAllEntriesForDecoder(raster_decoder_id_);
+ }
+
if (copy_tex_image_blit_.get()) {
copy_tex_image_blit_->Destroy();
copy_tex_image_blit_.reset();
@@ -767,8 +779,23 @@ void RasterDecoderImpl::Destroy(bool have_context) {
// Make sure we flush any pending skia work on this context.
if (sk_surface_) {
- sk_surface_->flush();
- sk_surface_.reset();
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = end_semaphores_.size(),
+ .fSignalSemaphores = end_semaphores_.data(),
+ };
+ AddVulkanCleanupTaskForSkiaFlush(
+ shared_context_state_->vk_context_provider(), &flush_info);
+ auto result = sk_surface_->flush(
+ SkSurface::BackendSurfaceAccess::kPresent, flush_info);
+ DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
+ end_semaphores_.clear();
+ if (shared_image_) {
+ shared_image_->EndWriteAccess(std::move(sk_surface_));
+ shared_image_.reset();
+ } else {
+ sk_surface_.reset();
+ }
}
if (gr_context()) {
gr_context()->flush();
@@ -796,7 +823,7 @@ void RasterDecoderImpl::Destroy(bool have_context) {
// Make this decoder's GL context current.
bool RasterDecoderImpl::MakeCurrent() {
- if (shared_context_state_->use_vulkan_gr_context())
+ if (!shared_context_state_->GrContextIsGL())
return true;
if (!context_.get())
@@ -849,7 +876,22 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
caps.texture_storage_image =
feature_info()->feature_flags().chromium_texture_storage_image;
caps.texture_storage = feature_info()->feature_flags().ext_texture_storage;
- api()->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &caps.max_texture_size);
+ // TODO(piman): have a consistent limit in shared image backings.
+ // https://crbug.com/960588
+ if (shared_context_state_->GrContextIsGL()) {
+ api()->glGetIntegervFn(GL_MAX_TEXTURE_SIZE, &caps.max_texture_size);
+ } else if (shared_context_state_->GrContextIsVulkan()) {
+#if BUILDFLAG(ENABLE_VULKAN)
+ caps.max_texture_size = shared_context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->vk_physical_device_properties()
+ .limits.maxImageDimension2D;
+#else
+ NOTREACHED();
+#endif
+ } else {
+ NOTIMPLEMENTED();
+ }
if (feature_info()->workarounds().max_texture_size) {
caps.max_texture_size = std::min(
caps.max_texture_size, feature_info()->workarounds().max_texture_size);
@@ -1494,13 +1536,13 @@ error::Error RasterDecoderImpl::HandleEndQueryEXT(
}
void RasterDecoderImpl::DoFinish() {
- if (!shared_context_state_->use_vulkan_gr_context())
+ if (shared_context_state_->GrContextIsGL())
api()->glFinishFn();
ProcessPendingQueries(true);
}
void RasterDecoderImpl::DoFlush() {
- if (!shared_context_state_->use_vulkan_gr_context())
+ if (shared_context_state_->GrContextIsGL())
api()->glFlushFn();
ProcessPendingQueries(false);
}
@@ -2040,8 +2082,17 @@ void RasterDecoderImpl::DoBeginRasterCHROMIUM(
gr_context()->maxSurfaceSampleCountForColorType(sk_color_type))
final_msaa_count = 0;
- sk_surface_ =
- shared_image_->BeginWriteAccess(final_msaa_count, surface_props);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ DCHECK(end_semaphores_.empty());
+ sk_surface_ = shared_image_->BeginWriteAccess(
+ final_msaa_count, surface_props, &begin_semaphores, &end_semaphores_);
+
+ if (!begin_semaphores.empty()) {
+ bool result =
+ sk_surface_->wait(begin_semaphores.size(), begin_semaphores.data());
+ DCHECK(result);
+ }
+
if (!sk_surface_) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glBeginRasterCHROMIUM",
"failed to create surface");
@@ -2076,6 +2127,11 @@ scoped_refptr<Buffer> RasterDecoderImpl::GetShmBuffer(uint32_t shm_id) {
return GetSharedMemoryBuffer(shm_id);
}
+void RasterDecoderImpl::ReportProgress() {
+ if (shared_context_state_->progress_reporter())
+ shared_context_state_->progress_reporter()->ReportProgress();
+}
+
void RasterDecoderImpl::DoRasterCHROMIUM(GLuint raster_shm_id,
GLuint raster_shm_offset,
GLuint raster_shm_size,
@@ -2178,7 +2234,24 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
// hangs.
gl::ScopedProgressReporter report_progress(
shared_context_state_->progress_reporter());
- sk_surface_->flush();
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = end_semaphores_.size(),
+ .fSignalSemaphores = end_semaphores_.data(),
+ };
+ AddVulkanCleanupTaskForSkiaFlush(
+ shared_context_state_->vk_context_provider(), &flush_info);
+ if (use_ddl_) {
+ // TODO(penghuang): Switch to sk_surface_->flush() when skia flush bug is
+ // fixed. https://crbug.com/958055
+ auto result = gr_context()->flush(flush_info);
+ DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
+ } else {
+ auto result = sk_surface_->flush(
+ SkSurface::BackendSurfaceAccess::kPresent, flush_info);
+ DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
+ }
+ end_semaphores_.clear();
}
if (!shared_image_) {
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
index 6837104f946..e6caf57d827 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest.cc
@@ -289,6 +289,7 @@ class RasterDecoderOOPTest : public testing::Test, DecoderClient {
context_state_->InitializeGL(GpuPreferences(), feature_info);
}
void TearDown() override {
+ context_state_->MakeCurrent(nullptr);
context_state_ = nullptr;
gl::init::ShutdownGL(false);
}
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index ef5fcd2a7e2..323c8516286 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -278,6 +278,8 @@ void RasterDecoderTestBase::ResetDecoder() {
for (auto& image : shared_images_)
image->OnContextLost();
shared_images_.clear();
+ context_->GLContextStub::MakeCurrent(surface_.get());
+ shared_context_state_.reset();
::gl::MockGLInterface::SetGLInterface(nullptr);
gl_.reset();
gl::init::ShutdownGL(false);
diff --git a/chromium/gpu/command_buffer/service/sequence_id.h b/chromium/gpu/command_buffer/service/sequence_id.h
index a2302c2ae9e..e79ffc08ea0 100644
--- a/chromium/gpu/command_buffer/service/sequence_id.h
+++ b/chromium/gpu/command_buffer/service/sequence_id.h
@@ -5,12 +5,12 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
#define GPU_COMMAND_BUFFER_SERVICE_SEQUENCE_ID_H_
-#include "gpu/command_buffer/common/id_type.h"
+#include "base/util/type_safety/id_type.h"
namespace gpu {
class SyncPointOrderData;
-using SequenceId = gpu::IdTypeU32<SyncPointOrderData>;
+using SequenceId = util::IdTypeU32<SyncPointOrderData>;
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index 196c2934c66..3feef451a3f 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -118,6 +118,7 @@ class ServiceFontManager::SkiaDiscardableManager
ServiceFontManager::ServiceFontManager(Client* client)
: client_(client),
+ client_thread_id_(base::PlatformThread::CurrentId()),
strike_client_(std::make_unique<SkStrikeClient>(
sk_make_sp<SkiaDiscardableManager>(this))) {}
@@ -139,6 +140,7 @@ bool ServiceFontManager::Deserialize(
uint32_t memory_size,
std::vector<SkDiscardableHandleId>* locked_handles) {
base::AutoLock hold(lock_);
+ DCHECK_EQ(client_thread_id_, base::PlatformThread::CurrentId());
DCHECK(locked_handles->empty());
DCHECK(!destroyed_);
@@ -225,10 +227,23 @@ bool ServiceFontManager::DeleteHandle(SkDiscardableHandleId handle_id) {
if (destroyed_)
return true;
+ // If this method returns true, the strike associated with the handle will be
+ // deleted which deletes the memory for all glyphs cached by the strike. On
+ // mac this is resulting in hangs during strike deserialization when a bunch
+ // of strikes may be deleted in bulk. Try to avoid that by pinging the
+ // progress reporter before deleting each strike.
+ // Note that this method should generally only run on the Gpu main thread,
+ // where skia is used, except for single process webview where the renderer
+ // and GPU run in the same process.
+ const bool report_progress =
+ base::PlatformThread::CurrentId() == client_thread_id_;
+
auto it = discardable_handle_map_.find(handle_id);
if (it == discardable_handle_map_.end()) {
LOG(ERROR) << "Tried to delete invalid SkDiscardableHandleId: "
<< handle_id;
+ if (report_progress)
+ client_->ReportProgress();
return true;
}
@@ -237,6 +252,8 @@ bool ServiceFontManager::DeleteHandle(SkDiscardableHandleId handle_id) {
return false;
discardable_handle_map_.erase(it);
+ if (report_progress)
+ client_->ReportProgress();
return true;
}
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.h b/chromium/gpu/command_buffer/service/service_font_manager.h
index 198c54fecb3..69a80876540 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.h
+++ b/chromium/gpu/command_buffer/service/service_font_manager.h
@@ -8,6 +8,7 @@
#include "base/containers/flat_map.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/lock.h"
+#include "base/threading/thread.h"
#include "gpu/command_buffer/common/discardable_handle.h"
#include "gpu/gpu_gles2_export.h"
#include "third_party/skia/src/core/SkRemoteGlyphCache.h"
@@ -22,6 +23,7 @@ class GPU_GLES2_EXPORT ServiceFontManager
public:
virtual ~Client() {}
virtual scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) = 0;
+ virtual void ReportProgress() = 0;
};
ServiceFontManager(Client* client);
@@ -46,6 +48,7 @@ class GPU_GLES2_EXPORT ServiceFontManager
base::Lock lock_;
Client* client_;
+ const base::PlatformThreadId client_thread_id_;
std::unique_ptr<SkStrikeClient> strike_client_;
base::flat_map<SkDiscardableHandleId, ServiceDiscardableHandle>
discardable_handle_map_;
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.cc b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
index 56f7b63cb51..aaf3d0fd5a1 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.cc
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.cc
@@ -40,6 +40,10 @@ void DumpMemoryForImageTransferCacheEntry(
// Alias the image entry to its skia counterpart, taking ownership of the
// memory and preventing double counting.
+ //
+ // TODO(andrescj): if entry->image() is backed by multiple textures,
+ // getBackendTexture() would end up flattening them which is undesirable:
+ // figure out how to report memory usage for those cases.
DCHECK(entry->image());
GrBackendTexture image_backend_texture =
entry->image()->getBackendTexture(false /* flushPendingGrContextIO */);
@@ -207,14 +211,13 @@ void ServiceTransferCache::DeleteAllEntriesForDecoder(int decoder_id) {
}
}
-bool ServiceTransferCache::CreateLockedImageEntry(
+bool ServiceTransferCache::CreateLockedHardwareDecodedImageEntry(
int decoder_id,
uint32_t entry_id,
ServiceDiscardableHandle handle,
GrContext* context,
- base::span<const uint8_t> decoded_image,
- size_t row_bytes,
- const SkImageInfo& image_info,
+ std::vector<sk_sp<SkImage>> plane_images,
+ size_t buffer_byte_size,
bool needs_mips,
sk_sp<SkColorSpace> target_color_space) {
EntryKey key(decoder_id, cc::TransferCacheEntryType::kImage, entry_id);
@@ -222,12 +225,11 @@ bool ServiceTransferCache::CreateLockedImageEntry(
if (found != entries_.end())
return false;
- // Create the service-side image transfer cache entry. Note that this involves
- // uploading the image if it fits in GPU memory.
+ // Create the service-side image transfer cache entry.
auto entry = std::make_unique<cc::ServiceImageTransferCacheEntry>();
- if (!entry->BuildFromDecodedData(context, decoded_image, row_bytes,
- image_info, needs_mips,
- target_color_space)) {
+ if (!entry->BuildFromHardwareDecodedImage(context, std::move(plane_images),
+ buffer_byte_size, needs_mips,
+ std::move(target_color_space))) {
return false;
}
diff --git a/chromium/gpu/command_buffer/service/service_transfer_cache.h b/chromium/gpu/command_buffer/service/service_transfer_cache.h
index 8b2f16a1ab6..54ab52f847d 100644
--- a/chromium/gpu/command_buffer/service/service_transfer_cache.h
+++ b/chromium/gpu/command_buffer/service/service_transfer_cache.h
@@ -9,6 +9,7 @@
#include <stdint.h>
#include <memory>
+#include <vector>
#include "base/containers/mru_cache.h"
#include "base/containers/span.h"
@@ -21,7 +22,7 @@
class GrContext;
class SkColorSpace;
-struct SkImageInfo;
+class SkImage;
namespace gpu {
@@ -60,22 +61,21 @@ class GPU_GLES2_EXPORT ServiceTransferCache
cc::ServiceTransferCacheEntry* GetEntry(const EntryKey& key);
void DeleteAllEntriesForDecoder(int decoder_id);
- // Creates an image transfer cache entry using the decoded data in
- // |decoded_image|. The |context| will be used to upload the image (if it's
- // determined to fit in the GPU). |row_bytes| is the stride, and |image_info|
- // describes the decoded data. |decoder_id| and |entry_id| are used for
- // creating the ServiceTransferCache::EntryKey (assuming
- // cc::TransferCacheEntryType:kImage for the type). Returns true if the entry
- // could be created and inserted; false otherwise.
- bool CreateLockedImageEntry(int decoder_id,
- uint32_t entry_id,
- ServiceDiscardableHandle handle,
- GrContext* context,
- base::span<const uint8_t> decoded_image,
- size_t row_bytes,
- const SkImageInfo& image_info,
- bool needs_mips,
- sk_sp<SkColorSpace> target_color_space);
+ // Creates an image transfer cache entry using |plane_images| (refer to
+ // ServiceImageTransferCacheEntry::BuildFromHardwareDecodedImage() for
+ // details). |decoder_id| and |entry_id| are used for creating the
+ // ServiceTransferCache::EntryKey (assuming cc::TransferCacheEntryType:kImage
+ // for the type). Returns true if the entry could be created and inserted;
+ // false otherwise.
+ bool CreateLockedHardwareDecodedImageEntry(
+ int decoder_id,
+ uint32_t entry_id,
+ ServiceDiscardableHandle handle,
+ GrContext* context,
+ std::vector<sk_sp<SkImage>> plane_images,
+ size_t buffer_byte_size,
+ bool needs_mips,
+ sk_sp<SkColorSpace> target_color_space);
void PurgeMemory(
base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level);
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index f7d5e0630d2..2010eb46707 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -23,6 +23,10 @@
#include "components/viz/common/gpu/vulkan_context_provider.h"
#endif
+#if defined(OS_MACOSX)
+#include "components/viz/common/gpu/metal_context_provider.h"
+#endif
+
namespace {
static constexpr size_t kInitialScratchDeserializationBufferSize = 1024;
}
@@ -35,24 +39,32 @@ SharedContextState::SharedContextState(
scoped_refptr<gl::GLContext> context,
bool use_virtualized_gl_contexts,
base::OnceClosure context_lost_callback,
- viz::VulkanContextProvider* vulkan_context_provider)
+ viz::VulkanContextProvider* vulkan_context_provider,
+ viz::MetalContextProvider* metal_context_provider)
: use_virtualized_gl_contexts_(use_virtualized_gl_contexts),
context_lost_callback_(std::move(context_lost_callback)),
vk_context_provider_(vulkan_context_provider),
-#if BUILDFLAG(ENABLE_VULKAN)
- gr_context_(vk_context_provider_ ? vk_context_provider_->GetGrContext()
- : nullptr),
-#endif
- use_vulkan_gr_context_(!!vk_context_provider_),
+ metal_context_provider_(metal_context_provider),
share_group_(std::move(share_group)),
context_(context),
real_context_(std::move(context)),
surface_(std::move(surface)),
weak_ptr_factory_(this) {
- if (use_vulkan_gr_context_) {
+ if (GrContextIsVulkan()) {
+#if BUILDFLAG(ENABLE_VULKAN)
+ gr_context_ = vk_context_provider_->GetGrContext();
+#endif
+ use_virtualized_gl_contexts_ = false;
DCHECK(gr_context_);
+ }
+ if (GrContextIsMetal()) {
+#if defined(OS_MACOSX)
+ gr_context_ = metal_context_provider_->GetGrContext();
+#endif
use_virtualized_gl_contexts_ = false;
+ DCHECK(gr_context_);
}
+
if (base::ThreadTaskRunnerHandle::IsSet()) {
base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedContextState", base::ThreadTaskRunnerHandle::Get());
@@ -63,8 +75,24 @@ SharedContextState::SharedContextState(
}
SharedContextState::~SharedContextState() {
- if (gr_context_)
- gr_context_->abandonContext();
+ // Delete the transfer cache first: that way, destruction callbacks for image
+ // entries can use *|this| to make the context current and do GPU clean up.
+ // The context should be current so that texture deletes that result from
+ // destroying the cache happen in the right context (unless the context is
+ // lost in which case we don't delete the textures).
+ DCHECK(IsCurrent(nullptr) || context_lost_);
+ transfer_cache_.reset();
+
+ // We should have the last ref on this GrContext to ensure we're not holding
+ // onto any skia objects using this context. Note that some tests don't run
+ // InitializeGrContext(), so |owned_gr_context_| is not expected to be
+ // initialized.
+ DCHECK(!owned_gr_context_ || owned_gr_context_->unique());
+
+ // Delete the GrContext. This will either do cleanup if the context is
+ // current, or the GrContext was already abandoned if the GLContext was lost.
+ owned_gr_context_.reset();
+
if (context_->IsCurrent(nullptr))
context_->ReleaseCurrent(nullptr);
base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
@@ -78,7 +106,12 @@ void SharedContextState::InitializeGrContext(
gl::ProgressReporter* progress_reporter) {
progress_reporter_ = progress_reporter;
- if (!use_vulkan_gr_context_) {
+#if defined(OS_MACOSX)
+ if (metal_context_provider_)
+ metal_context_provider_->SetProgressReporter(progress_reporter);
+#endif
+
+ if (GrContextIsGL()) {
DCHECK(context_->IsCurrent(nullptr));
sk_sp<GrGLInterface> interface(gl::init::CreateGrGLInterface(
*context_->GetVersionInfo(), workarounds.use_es2_for_oopr,
@@ -194,7 +227,7 @@ bool SharedContextState::InitializeGL(
}
bool SharedContextState::MakeCurrent(gl::GLSurface* surface) {
- if (use_vulkan_gr_context_)
+ if (!GrContextIsGL())
return true;
if (context_lost_)
@@ -208,6 +241,7 @@ bool SharedContextState::MakeCurrent(gl::GLSurface* surface) {
}
void SharedContextState::MarkContextLost() {
+ DCHECK(GrContextIsGL());
if (!context_lost_) {
scoped_refptr<SharedContextState> prevent_last_ref_drop = this;
context_lost_ = true;
@@ -223,8 +257,10 @@ void SharedContextState::MarkContextLost() {
}
bool SharedContextState::IsCurrent(gl::GLSurface* surface) {
- if (use_vulkan_gr_context_)
+ if (!GrContextIsGL())
return true;
+ if (context_lost_)
+ return false;
return context_->IsCurrent(surface);
}
@@ -281,7 +317,7 @@ void SharedContextState::PessimisticallyResetGrContext() const {
// Calling GrContext::resetContext() is very cheap, so we do it
// pessimistically. We could dirty less state if skia state setting
// performance becomes an issue.
- if (gr_context_ && !use_vulkan_gr_context_)
+ if (gr_context_ && GrContextIsGL())
gr_context_->resetContext();
}
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
index deef1ebeab7..5c57a6232ee 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -27,6 +27,7 @@ class GLSurface;
} // namespace gl
namespace viz {
+class MetalContextProvider;
class VulkanContextProvider;
} // namespace viz
@@ -54,12 +55,18 @@ class GPU_GLES2_EXPORT SharedContextState
scoped_refptr<gl::GLContext> context,
bool use_virtualized_gl_contexts,
base::OnceClosure context_lost_callback,
- viz::VulkanContextProvider* vulkan_context_provider = nullptr);
+ viz::VulkanContextProvider* vulkan_context_provider = nullptr,
+ viz::MetalContextProvider* metal_context_provider = nullptr);
void InitializeGrContext(const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
GpuProcessActivityFlags* activity_flags = nullptr,
gl::ProgressReporter* progress_reporter = nullptr);
+ bool GrContextIsGL() const {
+ return !vk_context_provider_ && !metal_context_provider_;
+ }
+ bool GrContextIsVulkan() const { return vk_context_provider_; }
+ bool GrContextIsMetal() const { return metal_context_provider_; }
bool InitializeGL(const GpuPreferences& gpu_preferences,
scoped_refptr<gles2::FeatureInfo> feature_info);
@@ -81,6 +88,9 @@ class GPU_GLES2_EXPORT SharedContextState
viz::VulkanContextProvider* vk_context_provider() {
return vk_context_provider_;
}
+ viz::MetalContextProvider* metal_context_provider() {
+ return metal_context_provider_;
+ }
gl::ProgressReporter* progress_reporter() const { return progress_reporter_; }
GrContext* gr_context() { return gr_context_; }
gles2::FeatureInfo* feature_info() { return feature_info_.get(); }
@@ -94,7 +104,6 @@ class GPU_GLES2_EXPORT SharedContextState
std::vector<uint8_t>* scratch_deserialization_buffer() {
return &scratch_deserialization_buffer_;
}
- bool use_vulkan_gr_context() const { return use_vulkan_gr_context_; }
size_t glyph_cache_max_texture_bytes() const {
return glyph_cache_max_texture_bytes_;
}
@@ -144,9 +153,9 @@ class GPU_GLES2_EXPORT SharedContextState
bool use_virtualized_gl_contexts_ = false;
base::OnceClosure context_lost_callback_;
- viz::VulkanContextProvider* vk_context_provider_ = nullptr;
+ viz::VulkanContextProvider* const vk_context_provider_;
+ viz::MetalContextProvider* const metal_context_provider_;
GrContext* gr_context_ = nullptr;
- const bool use_vulkan_gr_context_;
scoped_refptr<gl::GLShareGroup> share_group_;
scoped_refptr<gl::GLContext> context_;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.cc b/chromium/gpu/command_buffer/service/shared_image_backing.cc
index ab5b0a7be82..aed8536a4c5 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.cc
@@ -35,6 +35,12 @@ void SharedImageBacking::OnContextLost() {
have_context_ = false;
}
+#if defined(OS_WIN)
+bool SharedImageBacking::PresentSwapChain() {
+ return false;
+}
+#endif // OS_WIN
+
std::unique_ptr<SharedImageRepresentationGLTexture>
SharedImageBacking::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing.h b/chromium/gpu/command_buffer/service/shared_image_backing.h
index 53387c6306c..88b29155d69 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing.h
@@ -13,6 +13,7 @@
#include "base/memory/scoped_refptr.h"
#include "base/optional.h"
#include "base/synchronization/lock.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/gpu_gles2_export.h"
@@ -78,6 +79,12 @@ class GPU_GLES2_EXPORT SharedImageBacking {
// Destroys the underlying backing. Must be called before destruction.
virtual void Destroy() = 0;
+#if defined(OS_WIN)
+ // Swaps buffers of the swap chain associated with this backing. Returns true
+ // on success.
+ virtual bool PresentSwapChain();
+#endif // OS_WIN
+
// Allows the backing to attach additional data to the dump or dump
// additional sub paths.
virtual void OnMemoryDump(const std::string& dump_name,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
index 82ab25c2e62..406b13c85a3 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory.h
@@ -47,6 +47,11 @@ class SharedImageBackingFactory {
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) = 0;
+
+ // Returns true if the specified GpuMemoryBufferType can be imported using
+ // this factory.
+ virtual bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) = 0;
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index 2def0b0ce11..0d597bb04ef 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -42,6 +42,7 @@
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gl/android/android_surface_control_compat.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_fence_android_native_fence_sync.h"
#include "ui/gl/gl_gl_api_implementation.h"
@@ -73,94 +74,6 @@ std::ostream& operator<<(std::ostream& os, RepresentationAccessMode mode) {
return os;
}
-bool BeginVulkanAccess(viz::VulkanContextProvider* context_provider,
- base::ScopedFD begin_sync_fd) {
- VkDevice vk_device = context_provider->GetDeviceQueue()->GetVulkanDevice();
- VulkanImplementation* vk_implementation =
- context_provider->GetVulkanImplementation();
- VulkanFenceHelper* fence_helper =
- context_provider->GetDeviceQueue()->GetFenceHelper();
- VkQueue vk_queue = context_provider->GetDeviceQueue()->GetVulkanQueue();
-
- // Wait on the provided |begin_sync_fd|.
- VkSemaphore semaphore = VK_NULL_HANDLE;
- if (begin_sync_fd.is_valid()) {
- semaphore = vk_implementation->ImportSemaphoreHandle(
- vk_device,
- SemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
- std::move(begin_sync_fd)));
- if (semaphore == VK_NULL_HANDLE) {
- return false;
- }
-
- // Submit wait semaphore to the queue. Note that Skia uses the same queue
- // exposed by vk_queue(), so this will work due to Vulkan queue ordering.
- if (!SubmitWaitVkSemaphore(vk_queue, semaphore)) {
- vkDestroySemaphore(vk_device, semaphore, nullptr);
- return false;
- }
-
- // Enqueue destruction of the semaphore here.
- // TODO(ericrk): Don't worry about generating a fence above, we will
- // generate one in EndVulkanAccess. Refactoring will remove this path
- // soon.
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(semaphore);
- }
-
- return true;
-}
-
-void EndVulkanAccess(viz::VulkanContextProvider* context_provider,
- base::ScopedFD* sync_fd) {
- VulkanImplementation* vk_implementation =
- context_provider->GetVulkanImplementation();
- VkDevice vk_device = context_provider->GetDeviceQueue()->GetVulkanDevice();
- VkQueue vk_queue = context_provider->GetDeviceQueue()->GetVulkanQueue();
- VulkanFenceHelper* fence_helper =
- context_provider->GetDeviceQueue()->GetFenceHelper();
-
- // Create a vk semaphore which can be exported.
- VkExportSemaphoreCreateInfo export_info;
- export_info.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
- export_info.pNext = nullptr;
- export_info.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
-
- VkSemaphore vk_semaphore;
- VkSemaphoreCreateInfo sem_info;
- sem_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
- sem_info.pNext = &export_info;
- sem_info.flags = 0;
- VkResult result =
- vkCreateSemaphore(vk_device, &sem_info, nullptr, &vk_semaphore);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "vkCreateSemaphore failed";
- return;
- }
-
- VkFence vk_fence;
- result = fence_helper->GetFence(&vk_fence);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "Failed to create fence.";
- vkDestroySemaphore(vk_device, vk_semaphore, nullptr);
- return;
- }
-
- if (!SubmitSignalVkSemaphore(vk_queue, vk_semaphore, vk_fence)) {
- LOG(ERROR) << "Failed to wait on semaphore";
- vkDestroySemaphore(vk_device, vk_semaphore, nullptr);
- return;
- }
-
- // Export a sync fd from the semaphore.
- SemaphoreHandle semaphore_handle =
- vk_implementation->GetSemaphoreHandle(vk_device, vk_semaphore);
- *sync_fd = semaphore_handle.TakeHandle();
-
- // Enqueue cleanup of the semaphore based on the submission fence.
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(vk_semaphore);
- fence_helper->EnqueueFence(vk_fence);
-}
-
sk_sp<SkPromiseImageTexture> CreatePromiseTexture(
viz::VulkanContextProvider* context_provider,
base::android::ScopedHardwareBufferHandle ahb_handle,
@@ -377,21 +290,17 @@ class SharedImageRepresentationSkiaGLAHB
}
~SharedImageRepresentationSkiaGLAHB() override {
- if (mode_ == RepresentationAccessMode::kRead) {
- EndReadAccess();
- } else if (mode_ == RepresentationAccessMode::kWrite) {
- EndWriteAccessInternal();
- }
-
+ DCHECK_EQ(RepresentationAccessMode::kNone, mode_);
DCHECK(!surface_);
-
if (texture_)
texture_->RemoveLightweightRef(has_context());
}
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) override {
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
CheckContext();
@@ -406,9 +315,8 @@ class SharedImageRepresentationSkiaGLAHB
if (!InsertEglFenceAndWait(std::move(sync_fd)))
return nullptr;
- if (!promise_texture_) {
+ if (!promise_texture_)
return nullptr;
- }
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
@@ -426,13 +334,12 @@ class SharedImageRepresentationSkiaGLAHB
DCHECK(surface_);
DCHECK_EQ(surface.get(), surface_);
DCHECK(surface->unique());
- // TODO(ericrk): Keep the surface around for re-use.
- surface_ = nullptr;
-
- EndWriteAccessInternal();
+ EndAccess(false /* readonly */);
}
- sk_sp<SkPromiseImageTexture> BeginReadAccess() override {
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
CheckContext();
@@ -450,11 +357,7 @@ class SharedImageRepresentationSkiaGLAHB
void EndReadAccess() override {
DCHECK_EQ(mode_, RepresentationAccessMode::kRead);
CheckContext();
-
- base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
- ahb_backing()->EndRead(this, std::move(sync_fd));
-
- mode_ = RepresentationAccessMode::kNone;
+ EndAccess(true /* readonly */);
}
private:
@@ -468,20 +371,23 @@ class SharedImageRepresentationSkiaGLAHB
#endif
}
- void EndWriteAccessInternal() {
+ void EndAccess(bool readonly) {
CheckContext();
- DCHECK_EQ(RepresentationAccessMode::kWrite, mode_);
// Insert a gl fence to signal the write completion.
base::ScopedFD sync_fd = CreateEglFenceAndExportFd();
- ahb_backing()->EndWrite(std::move(sync_fd));
+ if (readonly)
+ ahb_backing()->EndRead(this, std::move(sync_fd));
+ else
+ ahb_backing()->EndWrite(std::move(sync_fd));
- if (texture_) {
+ if (texture_ && !readonly) {
if (texture_->IsLevelCleared(texture_->target(), 0))
backing()->SetCleared();
}
mode_ = RepresentationAccessMode::kNone;
+ surface_ = nullptr;
}
scoped_refptr<SharedContextState> context_state_;
@@ -521,19 +427,14 @@ class SharedImageRepresentationSkiaVkAHB
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) override {
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
- // If previous access has not ended.
DCHECK(!surface_);
- base::ScopedFD sync_fd;
- if (!ahb_backing()->BeginWrite(&sync_fd))
- return nullptr;
-
- if (!BeginVulkanAccess(context_state_->vk_context_provider(),
- std::move(sync_fd))) {
+ if (!BeginAccess(false /* readonly */, begin_semaphores, end_semaphores))
return nullptr;
- }
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
@@ -541,14 +442,8 @@ class SharedImageRepresentationSkiaVkAHB
context_state_->gr_context(), promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type, nullptr,
&surface_props);
-
- // Cache the sk surface in the representation so that it can be used in the
- // EndWriteAccess. Also make sure previous surface_ have been consumed by
- // EndWriteAccess() call.
+ DCHECK(surface);
surface_ = surface.get();
-
- mode_ = RepresentationAccessMode::kWrite;
-
return surface;
}
@@ -556,19 +451,19 @@ class SharedImageRepresentationSkiaVkAHB
DCHECK_EQ(mode_, RepresentationAccessMode::kWrite);
DCHECK_EQ(surface.get(), surface_);
DCHECK(surface->unique());
- EndWriteAccessInternal();
+
+ EndAccess(false /* readonly */);
+ surface_ = nullptr;
}
- sk_sp<SkPromiseImageTexture> BeginReadAccess() override {
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
DCHECK_EQ(mode_, RepresentationAccessMode::kNone);
+ DCHECK(!surface_);
- // Synchronise the read access with the writes.
- base::ScopedFD sync_fd;
- if (!ahb_backing()->BeginRead(this, &sync_fd))
+ if (!BeginAccess(true /* readonly */, begin_semaphores, end_semaphores))
return nullptr;
-
- mode_ = RepresentationAccessMode::kRead;
-
return promise_texture_;
}
@@ -576,12 +471,7 @@ class SharedImageRepresentationSkiaVkAHB
DCHECK_EQ(mode_, RepresentationAccessMode::kRead);
DCHECK(!surface_);
- base::ScopedFD sync_fd;
- EndVulkanAccess(context_state_->vk_context_provider(), &sync_fd);
- // pass this sync fd to the backing.
- ahb_backing()->EndRead(this, std::move(sync_fd));
-
- mode_ = RepresentationAccessMode::kNone;
+ EndAccess(true /* readonly */);
}
private:
@@ -611,16 +501,77 @@ class SharedImageRepresentationSkiaVkAHB
->GetVulkanQueue();
}
- void EndWriteAccessInternal() {
- // There should be a surface_ from the BeginWriteAccess().
- DCHECK_EQ(RepresentationAccessMode::kWrite, mode_);
- DCHECK(surface_);
+ bool BeginAccess(bool readonly,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ DCHECK(begin_semaphores);
+ DCHECK(end_semaphores);
+ DCHECK(end_access_semaphore_ == VK_NULL_HANDLE);
+ // Synchronise the read access with the writes.
base::ScopedFD sync_fd;
- EndVulkanAccess(context_state_->vk_context_provider(), &sync_fd);
- surface_ = nullptr;
- ahb_backing()->EndWrite(std::move(sync_fd));
+ if (readonly) {
+ if (!ahb_backing()->BeginRead(this, &sync_fd))
+ return false;
+ } else {
+ if (!ahb_backing()->BeginWrite(&sync_fd))
+ return false;
+ }
+
+ VkSemaphore begin_access_semaphore = VK_NULL_HANDLE;
+ if (sync_fd.is_valid()) {
+ begin_access_semaphore = vk_implementation()->ImportSemaphoreHandle(
+ vk_device(),
+ SemaphoreHandle(VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT,
+ std::move(sync_fd)));
+ if (begin_access_semaphore == VK_NULL_HANDLE) {
+ DLOG(ERROR) << "Failed to import semaphore from sync_fd.";
+ return false;
+ }
+ }
+
+ end_access_semaphore_ =
+ vk_implementation()->CreateExternalSemaphore(vk_device());
+
+ if (end_access_semaphore_ == VK_NULL_HANDLE) {
+ DLOG(ERROR) << "Failed to create the external semaphore.";
+ if (begin_access_semaphore != VK_NULL_HANDLE) {
+ vkDestroySemaphore(vk_device(), begin_access_semaphore,
+ nullptr /* pAllocator */);
+ }
+ return false;
+ }
+
+ if (begin_access_semaphore != VK_NULL_HANDLE) {
+ begin_semaphores->emplace_back();
+ begin_semaphores->back().initVulkan(begin_access_semaphore);
+ }
+ end_semaphores->emplace_back();
+ end_semaphores->back().initVulkan(end_access_semaphore_);
+
+ mode_ = readonly ? RepresentationAccessMode::kRead
+ : RepresentationAccessMode::kWrite;
+ return true;
+ }
+ void EndAccess(bool readonly) {
+ // There should be a surface_ from the BeginWriteAccess().
+ DCHECK(end_access_semaphore_ != VK_NULL_HANDLE);
+
+ SemaphoreHandle semaphore_handle = vk_implementation()->GetSemaphoreHandle(
+ vk_device(), end_access_semaphore_);
+ auto sync_fd = semaphore_handle.TakeHandle();
+ DCHECK(sync_fd.is_valid());
+ if (readonly)
+ ahb_backing()->EndRead(this, std::move(sync_fd));
+ else
+ ahb_backing()->EndWrite(std::move(sync_fd));
+ VulkanFenceHelper* fence_helper = context_state_->vk_context_provider()
+ ->GetDeviceQueue()
+ ->GetFenceHelper();
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
+ end_access_semaphore_);
+ end_access_semaphore_ = VK_NULL_HANDLE;
mode_ = RepresentationAccessMode::kNone;
}
@@ -628,6 +579,7 @@ class SharedImageRepresentationSkiaVkAHB
RepresentationAccessMode mode_ = RepresentationAccessMode::kNone;
SkSurface* surface_ = nullptr;
scoped_refptr<SharedContextState> context_state_;
+ VkSemaphore end_access_semaphore_ = VK_NULL_HANDLE;
};
SharedImageBackingAHB::SharedImageBackingAHB(
@@ -726,7 +678,7 @@ SharedImageBackingAHB::ProduceSkia(
// Check whether we are in Vulkan mode OR GL mode and accordingly create
// Skia representation.
- if (context_state->use_vulkan_gr_context()) {
+ if (context_state->GrContextIsVulkan()) {
sk_sp<SkPromiseImageTexture> promise_texture = CreatePromiseTexture(
context_state->vk_context_provider(), GetAhbHandle(), size(), format());
if (!promise_texture)
@@ -735,6 +687,7 @@ SharedImageBackingAHB::ProduceSkia(
manager, this, std::move(context_state), std::move(promise_texture),
tracker);
}
+ DCHECK(context_state->GrContextIsGL());
auto* texture = GenGLTexture();
if (!texture)
@@ -1036,6 +989,8 @@ SharedImageBackingFactoryAHB::CreateSharedImage(
// flags based on the usage params in the current function call.
hwb_desc.usage = AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE |
AHARDWAREBUFFER_USAGE_GPU_COLOR_OUTPUT;
+ if (usage & SHARED_IMAGE_USAGE_SCANOUT)
+ hwb_desc.usage |= gl::SurfaceControl::RequiredUsage();
// Number of images in an image array.
hwb_desc.layers = 1;
@@ -1071,6 +1026,11 @@ SharedImageBackingFactoryAHB::CreateSharedImage(
return nullptr;
}
+bool SharedImageBackingFactoryAHB::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return false;
+}
+
SharedImageBackingFactoryAHB::FormatInfo::FormatInfo() = default;
SharedImageBackingFactoryAHB::FormatInfo::~FormatInfo() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
index a1cec3595dd..49cf7fea364 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.h
@@ -55,6 +55,8 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryAHB
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
private:
struct FormatInfo {
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
index 24cac235233..cbffb4fdb49 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer_unittest.cc
@@ -22,6 +22,7 @@
#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/color_space.h"
#include "ui/gl/gl_bindings.h"
@@ -120,13 +121,22 @@ TEST_F(SharedImageBackingFactoryAHBTest, Basic) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
gl_legacy_shared_image.mailbox(), context_state_.get());
EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
auto surface = skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores,
+ &end_semaphores);
EXPECT_TRUE(surface);
EXPECT_EQ(gl_legacy_shared_image.size().width(), surface->width());
EXPECT_EQ(gl_legacy_shared_image.size().height(), surface->height());
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
+
skia_representation->EndWriteAccess(std::move(surface));
- auto promise_texture = skia_representation->BeginReadAccess();
+ auto promise_texture =
+ skia_representation->BeginReadAccess(&begin_semaphores, &end_semaphores);
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
EXPECT_TRUE(promise_texture);
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
@@ -187,7 +197,12 @@ TEST_F(SharedImageBackingFactoryAHBTest, GLSkiaGL) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
mailbox, context_state_.get());
EXPECT_TRUE(skia_representation);
- auto promise_texture = skia_representation->BeginReadAccess();
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ auto promise_texture =
+ skia_representation->BeginReadAccess(&begin_semaphores, &end_semaphores);
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
EXPECT_TRUE(promise_texture);
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
@@ -295,11 +310,26 @@ TEST_F(SharedImageBackingFactoryAHBTest, OnlyOneWriter) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
gl_legacy_shared_image.mailbox(), context_state_.get());
+
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
auto surface = skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores,
+ &end_semaphores);
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
- EXPECT_FALSE(skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry)));
+ auto skia_representation2 = shared_image_representation_factory_->ProduceSkia(
+ gl_legacy_shared_image.mailbox(), context_state_.get());
+ std::vector<GrBackendSemaphore> begin_semaphores2;
+ std::vector<GrBackendSemaphore> end_semaphores2;
+
+ EXPECT_FALSE(skia_representation2->BeginWriteAccess(
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores2,
+ &end_semaphores2));
+ EXPECT_EQ(0u, begin_semaphores2.size());
+ EXPECT_EQ(0u, end_semaphores2.size());
+ skia_representation2.reset();
skia_representation->EndWriteAccess(std::move(surface));
skia_representation.reset();
@@ -320,8 +350,17 @@ TEST_F(SharedImageBackingFactoryAHBTest, CanHaveMultipleReaders) {
auto skia_representation2 = shared_image_representation_factory_->ProduceSkia(
gl_legacy_shared_image.mailbox(), context_state_.get());
- EXPECT_TRUE(skia_representation->BeginReadAccess());
- EXPECT_TRUE(skia_representation2->BeginReadAccess());
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ EXPECT_TRUE(
+ skia_representation->BeginReadAccess(&begin_semaphores, &end_semaphores));
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
+
+ EXPECT_TRUE(skia_representation2->BeginReadAccess(&begin_semaphores,
+ &end_semaphores));
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
skia_representation2->EndReadAccess();
skia_representation2.reset();
@@ -329,9 +368,8 @@ TEST_F(SharedImageBackingFactoryAHBTest, CanHaveMultipleReaders) {
skia_representation.reset();
}
-// Test to check that we cannot begin reading twice on the same representation
-TEST_F(SharedImageBackingFactoryAHBTest,
- CannotReadMultipleTimesOnSameRepresentation) {
+// Test to check that a context cannot write while another context is reading
+TEST_F(SharedImageBackingFactoryAHBTest, CannotWriteWhileReading) {
if (!base::AndroidHardwareBufferCompat::IsSupportAvailable())
return;
@@ -342,29 +380,25 @@ TEST_F(SharedImageBackingFactoryAHBTest,
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
gl_legacy_shared_image.mailbox(), context_state_.get());
- EXPECT_TRUE(skia_representation->BeginReadAccess());
- EXPECT_FALSE(skia_representation->BeginReadAccess());
-
- skia_representation->EndReadAccess();
- skia_representation.reset();
-}
-
-// Test to check that a context cannot write while another context is reading
-TEST_F(SharedImageBackingFactoryAHBTest, CannotWriteWhileReading) {
- if (!base::AndroidHardwareBufferCompat::IsSupportAvailable())
- return;
- GlLegacySharedImage gl_legacy_shared_image{
- backing_factory_.get(), true /* is_thread_safe */,
- &mailbox_manager_, &shared_image_manager_,
- memory_type_tracker_.get(), shared_image_representation_factory_.get()};
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ EXPECT_TRUE(
+ skia_representation->BeginReadAccess(&begin_semaphores, &end_semaphores));
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
- auto skia_representation = shared_image_representation_factory_->ProduceSkia(
+ auto skia_representation2 = shared_image_representation_factory_->ProduceSkia(
gl_legacy_shared_image.mailbox(), context_state_.get());
- EXPECT_TRUE(skia_representation->BeginReadAccess());
- EXPECT_FALSE(skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry)));
+ std::vector<GrBackendSemaphore> begin_semaphores2;
+ std::vector<GrBackendSemaphore> end_semaphores2;
+ EXPECT_FALSE(skia_representation2->BeginWriteAccess(
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores2,
+ &end_semaphores2));
+ EXPECT_EQ(0u, begin_semaphores2.size());
+ EXPECT_EQ(0u, end_semaphores2.size());
+ skia_representation2.reset();
skia_representation->EndReadAccess();
skia_representation.reset();
@@ -382,10 +416,24 @@ TEST_F(SharedImageBackingFactoryAHBTest, CannotReadWhileWriting) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
gl_legacy_shared_image.mailbox(), context_state_.get());
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
auto surface = skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores,
+ &end_semaphores);
+ EXPECT_EQ(0u, begin_semaphores.size());
+ EXPECT_EQ(0u, end_semaphores.size());
- EXPECT_FALSE(skia_representation->BeginReadAccess());
+ auto skia_representation2 = shared_image_representation_factory_->ProduceSkia(
+ gl_legacy_shared_image.mailbox(), context_state_.get());
+ std::vector<GrBackendSemaphore> begin_semaphores2;
+ std::vector<GrBackendSemaphore> end_semaphores2;
+
+ EXPECT_FALSE(skia_representation2->BeginReadAccess(&begin_semaphores2,
+ &end_semaphores2));
+ EXPECT_EQ(0u, begin_semaphores2.size());
+ EXPECT_EQ(0u, end_semaphores2.size());
+ skia_representation2.reset();
skia_representation->EndWriteAccess(std::move(surface));
skia_representation.reset();
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 9daf649f355..0126a74c8a7 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -287,7 +287,9 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) override {
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
CheckContext();
if (write_surface_)
return nullptr;
@@ -313,7 +315,9 @@ class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
write_surface_ = nullptr;
}
- sk_sp<SkPromiseImageTexture> BeginReadAccess() override {
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
CheckContext();
static_cast<SharedImageBackingWithReadAccess*>(backing())
->BeginReadAccess();
@@ -805,8 +809,7 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
const char* error_message = "unspecified";
if (!gles2::ValidateCompressedTexDimensions(
target, 0 /* level */, size.width(), size.height(), 1 /* depth */,
- format_info.image_internal_format, false /* restrict_for_webgl */,
- &error_message)) {
+ format_info.image_internal_format, &error_message)) {
LOG(ERROR) << "CreateSharedImage: "
"ValidateCompressedTexDimensionsFailed with error: "
<< error_message;
@@ -1042,6 +1045,14 @@ scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
std::move(handle), size, format, client_id, surface_handle);
}
+bool SharedImageBackingFactoryGLTexture::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ // SharedImageFactory may call CanImportGpuMemoryBuffer() in all other
+ // SharedImageBackingFactory implementations except this one.
+ NOTREACHED();
+ return true;
+}
+
std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryGLTexture::MakeBacking(
bool passthrough,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index 7dba8b7bfcf..d077ef449d4 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -71,6 +71,8 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
static std::unique_ptr<SharedImageBacking> CreateSharedImageForTest(
const Mailbox& mailbox,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index e84928e62b4..b2cbef5e4ef 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -25,6 +25,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/color_space.h"
@@ -180,14 +181,22 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Basic) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
mailbox, context_state_.get());
EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
auto surface = skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores,
+ &end_semaphores);
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
skia_representation->EndWriteAccess(std::move(surface));
- auto promise_texture = skia_representation->BeginReadAccess();
+ auto promise_texture =
+ skia_representation->BeginReadAccess(&begin_semaphores, &end_semaphores);
EXPECT_TRUE(promise_texture);
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
EXPECT_TRUE(backend_texture.isValid());
@@ -279,14 +288,20 @@ TEST_P(SharedImageBackingFactoryGLTextureTest, Image) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
mailbox, context_state_.get());
EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
auto surface = skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores,
+ &end_semaphores);
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
skia_representation->EndWriteAccess(std::move(surface));
- auto promise_texture = skia_representation->BeginReadAccess();
+ auto promise_texture =
+ skia_representation->BeginReadAccess(&begin_semaphores, &end_semaphores);
EXPECT_TRUE(promise_texture);
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
EXPECT_TRUE(backend_texture.isValid());
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
index f049fc85a7b..c59b4c91f39 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
@@ -30,7 +30,8 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface
: public SharedImageBackingFactory {
public:
SharedImageBackingFactoryIOSurface(const GpuDriverBugWorkarounds& workarounds,
- const GpuFeatureInfo& gpu_feature_info);
+ const GpuFeatureInfo& gpu_feature_info,
+ bool use_gl);
~SharedImageBackingFactoryIOSurface() override;
// SharedImageBackingFactory implementation.
@@ -57,9 +58,14 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
private:
+ void CollectGLFormatInfo(const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info);
bool format_supported_by_gl_[viz::RESOURCE_FORMAT_MAX + 1];
+ bool use_gl_ = false;
DISALLOW_COPY_AND_ASSIGN(SharedImageBackingFactoryIOSurface);
};
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index f0301b3ce5f..3d78e84a77e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -5,8 +5,10 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_nsobject.h"
#include "base/memory/scoped_refptr.h"
#include "base/optional.h"
+#include "components/viz/common/gpu/metal_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
@@ -21,6 +23,8 @@
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_image_io_surface.h"
+#import <Metal/Metal.h>
+
// Usage of BUILDFLAG(USE_DAWN) needs to be after the include for
// ui/gl/buildflags.h
#if BUILDFLAG(USE_DAWN)
@@ -100,6 +104,72 @@ base::Optional<DawnTextureFormat> GetDawnFormat(viz::ResourceFormat format) {
}
}
+base::Optional<DawnTextureFormat> GetDawnFormat(gfx::BufferFormat format) {
+ switch (format) {
+ case gfx::BufferFormat::R_8:
+ return DAWN_TEXTURE_FORMAT_R8_UNORM;
+ case gfx::BufferFormat::RG_88:
+ return DAWN_TEXTURE_FORMAT_R8_G8_UNORM;
+ case gfx::BufferFormat::RGBX_8888:
+ case gfx::BufferFormat::RGBA_8888:
+ case gfx::BufferFormat::BGRX_8888:
+ return DAWN_TEXTURE_FORMAT_B8_G8_R8_A8_UNORM;
+ default:
+ return {};
+ }
+}
+
+base::scoped_nsprotocol<id<MTLTexture>> API_AVAILABLE(macos(10.11))
+ CreateMetalTexture(id<MTLDevice> mtl_device,
+ IOSurfaceRef io_surface,
+ const gfx::Size& size,
+ viz::ResourceFormat format) {
+ base::scoped_nsprotocol<id<MTLTexture>> mtl_texture;
+ MTLPixelFormat mtl_pixel_format;
+ switch (format) {
+ case viz::RED_8:
+ case viz::ALPHA_8:
+ case viz::LUMINANCE_8:
+ mtl_pixel_format = MTLPixelFormatR8Unorm;
+ break;
+ case viz::RG_88:
+ mtl_pixel_format = MTLPixelFormatRG8Unorm;
+ break;
+ case viz::RGBA_8888:
+ mtl_pixel_format = MTLPixelFormatRGBA8Unorm;
+ break;
+ case viz::BGRA_8888:
+ mtl_pixel_format = MTLPixelFormatBGRA8Unorm;
+ break;
+ default:
+ // TODO(https://crbug.com/952063): Add support for all formats supported
+ // by GLImageIOSurface.
+ DLOG(ERROR) << "Resource format not yet supported in Metal.";
+ return mtl_texture;
+ }
+ base::scoped_nsobject<MTLTextureDescriptor> mtl_tex_desc(
+ [MTLTextureDescriptor new]);
+ [mtl_tex_desc setTextureType:MTLTextureType2D];
+ [mtl_tex_desc
+ setUsage:MTLTextureUsageShaderRead | MTLTextureUsageRenderTarget];
+ [mtl_tex_desc setPixelFormat:mtl_pixel_format];
+ [mtl_tex_desc setWidth:size.width()];
+ [mtl_tex_desc setHeight:size.height()];
+ [mtl_tex_desc setDepth:1];
+ [mtl_tex_desc setMipmapLevelCount:1];
+ [mtl_tex_desc setArrayLength:1];
+ [mtl_tex_desc setSampleCount:1];
+ // TODO(https://crbug.com/952063): For zero-copy resources that are populated
+ // on the CPU (e.g, video frames), it may be that MTLStorageModeManaged will
+ // be more appropriate.
+ [mtl_tex_desc setStorageMode:MTLStorageModePrivate];
+ mtl_texture.reset([mtl_device newTextureWithDescriptor:mtl_tex_desc
+ iosurface:io_surface
+ plane:0]);
+ DCHECK(mtl_texture);
+ return mtl_texture;
+}
+
} // anonymous namespace
// Representation of a SharedImageBackingIOSurface as a GL Texture.
@@ -141,22 +211,24 @@ class SharedImageRepresentationSkiaIOSurface
scoped_refptr<SharedContextState> context_state,
sk_sp<SkPromiseImageTexture> promise_texture,
MemoryTypeTracker* tracker,
- gles2::Texture* texture)
+ gles2::Texture* gles2_texture)
: SharedImageRepresentationSkia(manager, backing, tracker),
context_state_(std::move(context_state)),
promise_texture_(std::move(promise_texture)),
- texture_(texture) {
- DCHECK(texture_);
+ gles2_texture_(gles2_texture) {
DCHECK(promise_texture_);
}
~SharedImageRepresentationSkiaIOSurface() override {
- texture_->RemoveLightweightRef(has_context());
+ if (gles2_texture_)
+ gles2_texture_->RemoveLightweightRef(has_context());
}
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) override {
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
@@ -167,23 +239,30 @@ class SharedImageRepresentationSkiaIOSurface
}
void EndWriteAccess(sk_sp<SkSurface> surface) override {
- FlushIOSurfaceGLOperations();
+ if (context_state_->GrContextIsGL())
+ FlushIOSurfaceGLOperations();
- if (texture_->IsLevelCleared(texture_->target(), 0)) {
+ if (gles2_texture_ &&
+ gles2_texture_->IsLevelCleared(gles2_texture_->target(), 0)) {
backing()->SetCleared();
}
}
- sk_sp<SkPromiseImageTexture> BeginReadAccess() override {
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
return promise_texture_;
}
- void EndReadAccess() override { FlushIOSurfaceGLOperations(); }
+ void EndReadAccess() override {
+ if (context_state_->GrContextIsGL())
+ FlushIOSurfaceGLOperations();
+ }
private:
scoped_refptr<SharedContextState> context_state_;
sk_sp<SkPromiseImageTexture> promise_texture_;
- gles2::Texture* texture_;
+ gles2::Texture* const gles2_texture_;
};
// Representation of a SharedImageBackingIOSurface as a Dawn Texture.
@@ -201,12 +280,20 @@ class SharedImageRepresentationDawnIOSurface
: SharedImageRepresentationDawn(manager, backing, tracker),
io_surface_(std::move(io_surface)),
device_(device),
- dawn_format_(dawn_format) {
+ dawn_format_(dawn_format),
+ dawn_procs_(dawn_native::GetProcs()) {
DCHECK(device_);
DCHECK(io_surface_);
+
+ // Keep a reference to the device so that it stays valid (it might become
+ // lost in which case operations will be noops).
+ dawn_procs_.deviceReference(device_);
}
- ~SharedImageRepresentationDawnIOSurface() override {}
+ ~SharedImageRepresentationDawnIOSurface() override {
+ EndAccess();
+ dawn_procs_.deviceRelease(device_);
+ }
DawnTexture BeginAccess(DawnTextureUsageBit usage) final {
DawnTextureDescriptor desc;
@@ -219,11 +306,37 @@ class SharedImageRepresentationDawnIOSurface
desc.mipLevelCount = 1;
desc.sampleCount = 1;
- return dawn_native::metal::WrapIOSurface(device_, &desc, io_surface_.get(),
- 0);
+ texture_ =
+ dawn_native::metal::WrapIOSurface(device_, &desc, io_surface_.get(), 0);
+
+ if (texture_) {
+ // Keep a reference to the texture so that it stays valid (its content
+ // might be destroyed).
+ dawn_procs_.textureReference(texture_);
+
+ // Assume that the user of this representation will write to the texture
+ // so set the cleared flag so that other representations don't overwrite
+ // the result.
+ // TODO(cwallez@chromium.org): This is incorrect and allows reading
+ // uninitialized data. When !IsCleared we should tell dawn_native to
+ // consider the texture lazy-cleared.
+ SetCleared();
+ }
+
+ return texture_;
}
void EndAccess() final {
+ if (!texture_) {
+ return;
+ }
+ // TODO(cwallez@chromium.org): query dawn_native to know if the texture was
+ // cleared and set IsCleared appropriately.
+
+ // All further operations on the textures are errors (they would be racy
+ // with other backings).
+ dawn_procs_.textureDestroy(texture_);
+
// macOS has a global GPU command queue so synchronization between APIs and
// devices is automatic. However on Metal, dawnQueueSubmit "commits" the
// Metal command buffers but they aren't "scheduled" in the global queue
@@ -233,12 +346,20 @@ class SharedImageRepresentationDawnIOSurface
// This is a blocking call but should be almost instant.
TRACE_EVENT0("gpu", "SharedImageRepresentationDawnIOSurface::EndAccess");
dawn_native::metal::WaitForCommandsToBeScheduled(device_);
+
+ dawn_procs_.textureRelease(texture_);
+ texture_ = nullptr;
}
private:
base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
DawnDevice device_;
+ DawnTexture texture_ = nullptr;
DawnTextureFormat dawn_format_;
+
+ // TODO(cwallez@chromium.org): Load procs only once when the factory is
+ // created and pass a pointer to them around?
+ DawnProcTable dawn_procs_;
};
#endif // BUILDFLAG(USE_DAWN)
@@ -316,20 +437,39 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
SharedImageManager* manager,
MemoryTypeTracker* tracker,
scoped_refptr<SharedContextState> context_state) override {
- gles2::Texture* texture = GenGLTexture();
- if (!texture) {
- return nullptr;
+ gles2::Texture* gles2_texture = nullptr;
+ GrBackendTexture gr_backend_texture;
+ if (context_state->GrContextIsGL()) {
+ gles2_texture = GenGLTexture();
+ if (!gles2_texture)
+ return nullptr;
+ GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
+ gles2_texture->target(), size(),
+ gles2_texture->service_id(), format(),
+ &gr_backend_texture);
+ }
+ if (context_state->GrContextIsMetal()) {
+ if (@available(macOS 10.11, *)) {
+ id<MTLDevice> mtl_device =
+ context_state->metal_context_provider()->GetMTLDevice();
+ base::scoped_nsprotocol<id<MTLTexture>> mtl_texture =
+ CreateMetalTexture(mtl_device, io_surface_, size(), format());
+ DCHECK(mtl_texture);
+ // GrBackendTexture will take ownership of the MTLTexture passed in the
+ // GrMtlTextureInfo argument, so pass in a retained pointer.
+ GrMtlTextureInfo info;
+ info.fTexture = [mtl_texture retain];
+ gr_backend_texture = GrBackendTexture(size().width(), size().height(),
+ GrMipMapped::kNo, info);
+ } else {
+ return nullptr;
+ }
}
-
- GrBackendTexture backend_texture;
- GetGrBackendTexture(gl::GLContext::GetCurrent()->GetVersionInfo(),
- texture->target(), size(), texture->service_id(),
- format(), &backend_texture);
sk_sp<SkPromiseImageTexture> promise_texture =
- SkPromiseImageTexture::Make(backend_texture);
+ SkPromiseImageTexture::Make(gr_backend_texture);
return std::make_unique<SharedImageRepresentationSkiaIOSurface>(
manager, this, std::move(context_state), promise_texture, tracker,
- texture);
+ gles2_texture);
}
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
@@ -431,6 +571,16 @@ class SharedImageBackingIOSurface : public SharedImageBacking {
// SharedImageBackings wrapping IOSurfaces.
SharedImageBackingFactoryIOSurface::SharedImageBackingFactoryIOSurface(
const GpuDriverBugWorkarounds& workarounds,
+ const GpuFeatureInfo& gpu_feature_info,
+ bool use_gl)
+ : use_gl_(use_gl) {
+ if (use_gl_) {
+ CollectGLFormatInfo(workarounds, gpu_feature_info);
+ }
+}
+
+void SharedImageBackingFactoryIOSurface::CollectGLFormatInfo(
+ const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info) {
scoped_refptr<gles2::FeatureInfo> feature_info =
new gles2::FeatureInfo(workarounds, gpu_feature_info);
@@ -465,7 +615,7 @@ SharedImageBackingFactoryIOSurface::CreateSharedImage(
DCHECK(!is_thread_safe);
// Check the format is supported and for simplicity always require it to be
// supported for GL.
- if (!format_supported_by_gl_[format]) {
+ if (use_gl_ && !format_supported_by_gl_[format]) {
LOG(ERROR) << "viz::ResourceFormat " << format
<< " not supported by IOSurfaces";
return nullptr;
@@ -503,6 +653,7 @@ SharedImageBackingFactoryIOSurface::CreateSharedImage(
NOTIMPLEMENTED();
return nullptr;
}
+
std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryIOSurface::CreateSharedImage(
const Mailbox& mailbox,
@@ -513,8 +664,34 @@ SharedImageBackingFactoryIOSurface::CreateSharedImage(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
- NOTIMPLEMENTED();
- return nullptr;
+ if (handle.type != gfx::GpuMemoryBufferType::IO_SURFACE_BUFFER) {
+ NOTIMPLEMENTED();
+ return nullptr;
+ }
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface(
+ IOSurfaceLookupFromMachPort(handle.mach_port.get()));
+ if (!io_surface) {
+ DLOG(ERROR) << "IOSurfaceLookupFromMachPort failed.";
+ return nullptr;
+ }
+
+ viz::ResourceFormat resource_format = viz::GetResourceFormat(format);
+ size_t estimated_size = 0;
+ if (!viz::ResourceSizes::MaybeSizeInBytes(size, resource_format,
+ &estimated_size)) {
+ DLOG(ERROR) << "Failed to calculate SharedImage size";
+ return nullptr;
+ }
+
+ return std::make_unique<SharedImageBackingIOSurface>(
+ mailbox, resource_format, size, color_space, usage, std::move(io_surface),
+ GetDawnFormat(format), estimated_size);
+}
+
+bool SharedImageBackingFactoryIOSurface::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return false;
}
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
index 7a4411f8dc9..112707483bc 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface_unittest.cc
@@ -21,6 +21,7 @@
#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/core/SkPromiseImageTexture.h"
#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "ui/gl/buildflags.h"
#include "ui/gl/gl_context.h"
@@ -57,7 +58,7 @@ class SharedImageBackingFactoryIOSurfaceTest : public testing::Test {
context_state_->InitializeGL(GpuPreferences(), std::move(feature_info));
backing_factory_ = std::make_unique<SharedImageBackingFactoryIOSurface>(
- workarounds, GpuFeatureInfo());
+ workarounds, GpuFeatureInfo(), /*use_gl*/ true);
memory_type_tracker_ = std::make_unique<MemoryTypeTracker>(nullptr);
shared_image_representation_factory_ =
@@ -135,13 +136,18 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Basic) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
mailbox, context_state_);
EXPECT_TRUE(skia_representation);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
auto surface = skia_representation->BeginWriteAccess(
- 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry));
+ 0, SkSurfaceProps(0, kUnknown_SkPixelGeometry), &begin_semaphores,
+ &end_semaphores);
EXPECT_TRUE(surface);
EXPECT_EQ(size.width(), surface->width());
EXPECT_EQ(size.height(), surface->height());
+ EXPECT_TRUE(begin_semaphores.empty());
+ EXPECT_TRUE(end_semaphores.empty());
skia_representation->EndWriteAccess(std::move(surface));
- auto promise_texture = skia_representation->BeginReadAccess();
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr, nullptr);
EXPECT_TRUE(promise_texture);
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
@@ -202,7 +208,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, GL_SkiaGL) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
mailbox, context_state_);
EXPECT_TRUE(skia_representation);
- auto promise_texture = skia_representation->BeginReadAccess();
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr, nullptr);
EXPECT_TRUE(promise_texture);
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
@@ -282,18 +288,19 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
dawn::Texture::Acquire(dawn_representation->BeginAccess(
DAWN_TEXTURE_USAGE_BIT_OUTPUT_ATTACHMENT));
- dawn::RenderPassColorAttachmentDescriptor colorDesc;
- colorDesc.attachment = texture.CreateDefaultView();
- colorDesc.resolveTarget = nullptr;
- colorDesc.loadOp = dawn::LoadOp::Clear;
- colorDesc.storeOp = dawn::StoreOp::Store;
- colorDesc.clearColor = {0, 255, 0, 255};
+ dawn::RenderPassColorAttachmentDescriptor color_desc;
+ color_desc.attachment = texture.CreateDefaultView();
+ color_desc.resolveTarget = nullptr;
+ color_desc.loadOp = dawn::LoadOp::Clear;
+ color_desc.storeOp = dawn::StoreOp::Store;
+ color_desc.clearColor = {0, 255, 0, 255};
- dawn::RenderPassColorAttachmentDescriptor* colorAttachmentsPtr = &colorDesc;
+ dawn::RenderPassColorAttachmentDescriptor* color_attachments_ptr =
+ &color_desc;
dawn::RenderPassDescriptor renderPassDesc;
renderPassDesc.colorAttachmentCount = 1;
- renderPassDesc.colorAttachments = &colorAttachmentsPtr;
+ renderPassDesc.colorAttachments = &color_attachments_ptr;
renderPassDesc.depthStencilAttachment = nullptr;
dawn::CommandEncoder encoder = device.CreateCommandEncoder();
@@ -311,7 +318,7 @@ TEST_F(SharedImageBackingFactoryIOSurfaceTest, Dawn_SkiaGL) {
auto skia_representation = shared_image_representation_factory_->ProduceSkia(
mailbox, context_state_);
EXPECT_TRUE(skia_representation);
- auto promise_texture = skia_representation->BeginReadAccess();
+ auto promise_texture = skia_representation->BeginReadAccess(nullptr, nullptr);
EXPECT_TRUE(promise_texture);
if (promise_texture) {
GrBackendTexture backend_texture = promise_texture->backendTexture();
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index f4fca8c0f95..399b7151dad 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -24,6 +24,7 @@
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/wrapped_sk_image.h"
#include "gpu/config/gpu_preferences.h"
+#include "ui/gl/gl_implementation.h"
#include "ui/gl/trace_util.h"
#if (defined(USE_X11) || defined(OS_FUCHSIA)) && BUILDFLAG(ENABLE_VULKAN)
@@ -34,6 +35,10 @@
#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
#endif
+#if defined(OS_WIN)
+#include "gpu/command_buffer/service/swap_chain_factory_dxgi.h"
+#endif // OS_WIN
+
namespace gpu {
// Overrides for flat_set lookups:
@@ -63,13 +68,17 @@ SharedImageFactory::SharedImageFactory(
SharedImageManager* shared_image_manager,
ImageFactory* image_factory,
MemoryTracker* memory_tracker,
- bool is_using_skia_renderer)
+ bool enable_wrapped_sk_image)
: mailbox_manager_(mailbox_manager),
shared_image_manager_(shared_image_manager),
memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)),
- using_vulkan_(context_state && context_state->use_vulkan_gr_context()) {
- gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
- gpu_preferences, workarounds, gpu_feature_info, image_factory);
+ using_vulkan_(context_state && context_state->GrContextIsVulkan()),
+ using_metal_(context_state && context_state->GrContextIsMetal()) {
+ bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone;
+ if (use_gl) {
+ gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
+ gpu_preferences, workarounds, gpu_feature_info, image_factory);
+ }
// For X11
#if (defined(USE_X11) || defined(OS_FUCHSIA)) && BUILDFLAG(ENABLE_VULKAN)
if (using_vulkan_) {
@@ -84,19 +93,23 @@ SharedImageFactory::SharedImageFactory(
// OSX
DCHECK(!using_vulkan_);
interop_backing_factory_ =
- std::make_unique<SharedImageBackingFactoryIOSurface>(workarounds,
- gpu_feature_info);
+ std::make_unique<SharedImageBackingFactoryIOSurface>(
+ workarounds, gpu_feature_info, use_gl);
#else
// Others
DCHECK(!using_vulkan_);
#endif
- // Certain test suites may enable UseSkiaRenderer feature flag, but never
- // create a SkiaRenderer. In this case context_state is nullptr and we should
- // not create a WrappedSkImageFactory.
- if (is_using_skia_renderer && context_state) {
+ if (enable_wrapped_sk_image && context_state) {
wrapped_sk_image_factory_ =
std::make_unique<raster::WrappedSkImageFactory>(context_state);
}
+
+#if defined(OS_WIN)
+ // For Windows
+ bool use_passthrough = gpu_preferences.use_passthrough_cmd_decoder &&
+ gles2::PassthroughCommandDecoderSupported();
+ swap_chain_factory_ = std::make_unique<SwapChainFactoryDXGI>(use_passthrough);
+#endif // OS_WIN
}
SharedImageFactory::~SharedImageFactory() {
@@ -163,15 +176,7 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
// TODO(piman): depending on handle.type, choose platform-specific backing
// factory, e.g. SharedImageBackingFactoryAHB.
bool allow_legacy_mailbox = false;
- SharedImageBackingFactory* factory = nullptr;
- if (!using_vulkan_) {
- // GMB is only supported by gl backing factory when gl is being used.
- allow_legacy_mailbox = true;
- factory = gl_backing_factory_.get();
- } else {
- // TODO(penghuang): support GMB for vulkan.
- NOTIMPLEMENTED() << "GMB is not supported for vulkan.";
- }
+ auto* factory = GetFactoryByUsage(usage, &allow_legacy_mailbox, handle.type);
if (!factory)
return false;
auto backing =
@@ -208,6 +213,35 @@ void SharedImageFactory::DestroyAllSharedImages(bool have_context) {
shared_images_.clear();
}
+#if defined(OS_WIN)
+bool SharedImageFactory::CreateSwapChain(const Mailbox& front_buffer_mailbox,
+ const Mailbox& back_buffer_mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ DCHECK(swap_chain_factory_);
+ bool allow_legacy_mailbox = true;
+ auto backings = swap_chain_factory_->CreateSwapChain(
+ front_buffer_mailbox, back_buffer_mailbox, format, size, color_space,
+ usage);
+ return RegisterBacking(std::move(backings.front_buffer),
+ allow_legacy_mailbox) &&
+ RegisterBacking(std::move(backings.back_buffer), allow_legacy_mailbox);
+}
+
+bool SharedImageFactory::PresentSwapChain(const Mailbox& mailbox) {
+ DCHECK(swap_chain_factory_);
+ auto it = shared_images_.find(mailbox);
+ if (it == shared_images_.end()) {
+ DLOG(ERROR) << "PresentSwapChain: Could not find shared image mailbox";
+ return false;
+ }
+ (*it)->PresentSwapChain();
+ return true;
+}
+#endif // OS_WIN
+
// TODO(ericrk): Move this entirely to SharedImageManager.
bool SharedImageFactory::OnMemoryDump(
const base::trace_event::MemoryDumpArgs& args,
@@ -231,14 +265,18 @@ bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) {
SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
uint32_t usage,
- bool* allow_legacy_mailbox) {
+ bool* allow_legacy_mailbox,
+ gfx::GpuMemoryBufferType gmb_type) {
bool using_dawn = usage & SHARED_IMAGE_USAGE_WEBGPU;
bool vulkan_usage = using_vulkan_ && (usage & SHARED_IMAGE_USAGE_DISPLAY);
bool gl_usage = usage & SHARED_IMAGE_USAGE_GLES2;
+ bool share_between_gl_metal =
+ using_metal_ && (usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION);
bool share_between_threads = IsSharedBetweenThreads(usage);
bool share_between_gl_vulkan = gl_usage && vulkan_usage;
- bool using_interop_factory =
- share_between_threads || share_between_gl_vulkan || using_dawn;
+ bool using_interop_factory = share_between_threads ||
+ share_between_gl_vulkan || using_dawn ||
+ share_between_gl_metal;
// wrapped_sk_image_factory_ is only used for OOPR and supports
// a limited number of flags (e.g. no SHARED_IMAGE_USAGE_SCANOUT).
constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
@@ -249,6 +287,23 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
!using_interop_factory;
using_interop_factory |= vulkan_usage && !using_wrapped_sk_image;
+ if (gmb_type != gfx::EMPTY_BUFFER) {
+ bool interop_factory_supports_gmb =
+ interop_backing_factory_ &&
+ interop_backing_factory_->CanImportGpuMemoryBuffer(gmb_type);
+
+ if (using_wrapped_sk_image ||
+ (using_interop_factory && !interop_backing_factory_)) {
+ LOG(ERROR) << "Unable to screate SharedImage backing: no support for the "
+ "requested GpuMemoryBufferType.";
+ return nullptr;
+ }
+
+ // If |interop_backing_factory_| supports supplied GMB type then use it
+ // instead of |gl_backing_factory_|.
+ using_interop_factory |= interop_factory_supports_gmb;
+ }
+
*allow_legacy_mailbox =
!using_wrapped_sk_image && !using_interop_factory && !using_vulkan_;
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 6c2a29ecd10..e5401df8623 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -9,6 +9,7 @@
#include "base/containers/flat_set.h"
#include "base/memory/scoped_refptr.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -30,6 +31,10 @@ struct GpuFeatureInfo;
struct GpuPreferences;
class MemoryTracker;
+#if defined(OS_WIN)
+class SwapChainFactoryDXGI;
+#endif // OS_WIN
+
namespace raster {
class WrappedSkImageFactory;
} // namespace raster
@@ -46,7 +51,7 @@ class GPU_GLES2_EXPORT SharedImageFactory {
SharedImageManager* manager,
ImageFactory* image_factory,
MemoryTracker* tracker,
- bool is_using_skia_renderer);
+ bool enable_wrapped_sk_image);
~SharedImageFactory();
bool CreateSharedImage(const Mailbox& mailbox,
@@ -72,6 +77,17 @@ class GPU_GLES2_EXPORT SharedImageFactory {
bool DestroySharedImage(const Mailbox& mailbox);
bool HasImages() const { return !shared_images_.empty(); }
void DestroyAllSharedImages(bool have_context);
+
+#if defined(OS_WIN)
+ bool CreateSwapChain(const Mailbox& front_buffer_mailbox,
+ const Mailbox& back_buffer_mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage);
+ bool PresentSwapChain(const Mailbox& mailbox);
+#endif // OS_WIN
+
bool OnMemoryDump(const base::trace_event::MemoryDumpArgs& args,
base::trace_event::ProcessMemoryDump* pmd,
int client_id,
@@ -81,12 +97,15 @@ class GPU_GLES2_EXPORT SharedImageFactory {
private:
bool IsSharedBetweenThreads(uint32_t usage);
- SharedImageBackingFactory* GetFactoryByUsage(uint32_t usage,
- bool* allow_legacy_mailbox);
+ SharedImageBackingFactory* GetFactoryByUsage(
+ uint32_t usage,
+ bool* allow_legacy_mailbox,
+ gfx::GpuMemoryBufferType gmb_type = gfx::EMPTY_BUFFER);
MailboxManager* mailbox_manager_;
SharedImageManager* shared_image_manager_;
std::unique_ptr<MemoryTypeTracker> memory_tracker_;
const bool using_vulkan_;
+ const bool using_metal_;
// The set of SharedImages which have been created (and are being kept alive)
// by this factory.
@@ -102,6 +121,11 @@ class GPU_GLES2_EXPORT SharedImageFactory {
// Non-null if compositing with SkiaRenderer.
std::unique_ptr<raster::WrappedSkImageFactory> wrapped_sk_image_factory_;
+
+#if defined(OS_WIN)
+ // Used for creating DXGI Swap Chain.
+ std::unique_ptr<SwapChainFactoryDXGI> swap_chain_factory_;
+#endif // OS_WIN
};
class GPU_GLES2_EXPORT SharedImageRepresentationFactory {
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
index c6cfb092b68..4bac263600c 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory_unittest.cc
@@ -41,7 +41,7 @@ class SharedImageFactoryTest : public testing::Test {
factory_ = std::make_unique<SharedImageFactory>(
preferences, workarounds, GpuFeatureInfo(), nullptr, &mailbox_manager_,
&shared_image_manager_, &image_factory_, nullptr,
- /*is_using_skia_renderer=*/false);
+ /*enable_wrapped_sk_image=*/false);
}
void TearDown() override {
@@ -91,7 +91,7 @@ TEST_F(SharedImageFactoryTest, DuplicateMailbox) {
auto other_factory = std::make_unique<SharedImageFactory>(
preferences, workarounds, GpuFeatureInfo(), nullptr, &mailbox_manager_,
&shared_image_manager_, &image_factory_, nullptr,
- /*is_using_skia_renderer=*/false);
+ /*enable_wrapped_sk_image=*/false);
EXPECT_FALSE(other_factory->CreateSharedImage(mailbox, format, size,
color_space, usage));
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index 68ecb464314..6efe9cce6d0 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -135,6 +135,9 @@ SharedImageManager::ProduceGLTexture(const Mailbox& mailbox,
std::unique_ptr<SharedImageRepresentationGLTexture>
SharedImageManager::ProduceRGBEmulationGLTexture(const Mailbox& mailbox,
MemoryTypeTracker* tracker) {
+ CALLED_ON_VALID_THREAD();
+
+ AutoLock autolock(this);
auto found = images_.find(mailbox);
if (found == images_.end()) {
LOG(ERROR) << "SharedImageManager::ProduceRGBEmulationGLTexture: Trying to "
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.h b/chromium/gpu/command_buffer/service/shared_image_representation.h
index 27c1841512e..35f409d48b9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.h
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.h
@@ -8,6 +8,7 @@
#include <dawn/dawn.h>
#include "base/callback_helpers.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -69,6 +70,9 @@ class SharedImageRepresentationFactoryRef : public SharedImageRepresentation {
const Mailbox& mailbox() const { return backing()->mailbox(); }
void Update() { backing()->Update(); }
+#if defined(OS_WIN)
+ void PresentSwapChain() { backing()->PresentSwapChain(); }
+#endif // OS_WIN
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) {
return backing()->ProduceLegacyMailbox(mailbox_manager);
}
@@ -150,11 +154,30 @@ class SharedImageRepresentationSkia : public SharedImageRepresentation {
MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
+ // Begin the write access. The implementations should insert semaphores into
+ // begin_semaphores vector which client will wait on before writing the
+ // backing. The ownership of begin_semaphores will be passed to client.
+ // The implementations should also insert semaphores into end_semaphores,
+ // client must submit them with drawing operations which use the backing.
+ // The ownership of end_semaphores are not passed to client. And client must
+ // submit the end_semaphores before calling EndWriteAccess().
virtual sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) = 0;
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) = 0;
virtual void EndWriteAccess(sk_sp<SkSurface> surface) = 0;
- virtual sk_sp<SkPromiseImageTexture> BeginReadAccess() = 0;
+
+ // Begin the read access. The implementations should insert semaphores into
+ // begin_semaphores vector which client will wait on before reading the
+ // backing. The ownership of begin_semaphores will be passed to client.
+ // The implementations should also insert semaphores into end_semaphores,
+ // client must submit them with drawing operations which use the backing.
+ // The ownership of end_semaphores are not passed to client. And client must
+ // submit the end_semaphores before calling EndReadAccess().
+ virtual sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) = 0;
virtual void EndReadAccess() = 0;
};
@@ -165,6 +188,8 @@ class SharedImageRepresentationDawn : public SharedImageRepresentation {
MemoryTypeTracker* tracker)
: SharedImageRepresentation(manager, backing, tracker) {}
+ // This can return null in case of a Dawn validation error, for example if
+ // usage is invalid.
virtual DawnTexture BeginAccess(DawnTextureUsageBit usage) = 0;
virtual void EndAccess() = 0;
};
diff --git a/chromium/gpu/command_buffer/service/skia_utils.cc b/chromium/gpu/command_buffer/service/skia_utils.cc
index 5c4466b1619..ec81f696f4e 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.cc
+++ b/chromium/gpu/command_buffer/service/skia_utils.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/service/skia_utils.h"
#include "base/logging.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
@@ -13,15 +14,38 @@
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/gl_version_info.h"
+#if BUILDFLAG(ENABLE_VULKAN)
+#include "gpu/vulkan/vulkan_device_queue.h"
+#include "gpu/vulkan/vulkan_fence_helper.h"
+#endif
+
namespace gpu {
+namespace {
+
+struct FlushCleanupContext {
+ std::vector<base::OnceClosure> cleanup_tasks;
+};
+
+void CleanupAfterSkiaFlush(void* context) {
+ FlushCleanupContext* flush_context =
+ static_cast<FlushCleanupContext*>(context);
+ for (auto& task : flush_context->cleanup_tasks) {
+ std::move(task).Run();
+ }
+ delete flush_context;
+}
+
+} // namespace
+
bool GetGrBackendTexture(const gl::GLVersionInfo* version_info,
GLenum target,
const gfx::Size& size,
GLuint service_id,
viz::ResourceFormat resource_format,
GrBackendTexture* gr_texture) {
- if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE_ARB) {
+ if (target != GL_TEXTURE_2D && target != GL_TEXTURE_RECTANGLE_ARB &&
+ target != GL_TEXTURE_EXTERNAL_OES) {
LOG(ERROR) << "GetGrBackendTexture: invalid texture target.";
return false;
}
@@ -36,4 +60,34 @@ bool GetGrBackendTexture(const gl::GLVersionInfo* version_info,
return true;
}
+void AddCleanupTaskForSkiaFlush(base::OnceClosure task,
+ GrFlushInfo* flush_info) {
+ FlushCleanupContext* context;
+ if (!flush_info->fFinishedProc) {
+ DCHECK(!flush_info->fFinishedContext);
+ flush_info->fFinishedProc = &CleanupAfterSkiaFlush;
+ context = new FlushCleanupContext();
+ flush_info->fFinishedContext = context;
+ } else {
+ DCHECK_EQ(flush_info->fFinishedProc, &CleanupAfterSkiaFlush);
+ DCHECK(flush_info->fFinishedContext);
+ context = static_cast<FlushCleanupContext*>(flush_info->fFinishedContext);
+ }
+ context->cleanup_tasks.push_back(std::move(task));
+}
+
+void AddVulkanCleanupTaskForSkiaFlush(
+ viz::VulkanContextProvider* context_provider,
+ GrFlushInfo* flush_info) {
+#if BUILDFLAG(ENABLE_VULKAN)
+ if (context_provider) {
+ auto task = context_provider->GetDeviceQueue()
+ ->GetFenceHelper()
+ ->CreateExternalCallback();
+ if (task)
+ AddCleanupTaskForSkiaFlush(std::move(task), flush_info);
+ }
+#endif
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/skia_utils.h b/chromium/gpu/command_buffer/service/skia_utils.h
index 166314616bc..3edf8b47187 100644
--- a/chromium/gpu/command_buffer/service/skia_utils.h
+++ b/chromium/gpu/command_buffer/service/skia_utils.h
@@ -5,8 +5,11 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
#define GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
+#include "base/callback_forward.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/gpu_gles2_export.h"
+#include "third_party/skia/include/core/SkSurface.h"
+#include "third_party/skia/include/gpu/GrTypes.h"
// Forwardly declare a few GL types to avoid including GL header files.
typedef int GLint;
@@ -23,6 +26,10 @@ namespace gl {
struct GLVersionInfo;
} // namespace gl
+namespace viz {
+class VulkanContextProvider;
+} // namespace viz
+
namespace gpu {
// Creates a GrBackendTexture from a service ID. Skia does not take ownership.
// Returns true on success.
@@ -33,6 +40,16 @@ GPU_GLES2_EXPORT bool GetGrBackendTexture(const gl::GLVersionInfo* version_info,
viz::ResourceFormat resource_format,
GrBackendTexture* gr_texture);
+// Adds a task to be executed when the flush in |flush_info| is complete.
+GPU_GLES2_EXPORT void AddCleanupTaskForSkiaFlush(base::OnceClosure task,
+ GrFlushInfo* flush_info);
+
+// Helper which associates cleanup callbacks with a Skia GrFlushInfo's callback.
+// Is a no-op if |context_provider| is null.
+GPU_GLES2_EXPORT void AddVulkanCleanupTaskForSkiaFlush(
+ viz::VulkanContextProvider* context_provider,
+ GrFlushInfo* flush_info);
+
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_SKIA_UTILS_H_
diff --git a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc
new file mode 100644
index 00000000000..00447c1d48b
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.cc
@@ -0,0 +1,438 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/swap_chain_factory_dxgi.h"
+
+#include <d3d11.h>
+
+#include "base/trace_event/memory_dump_manager.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/service/mailbox_manager.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "ui/gl/gl_angle_util_win.h"
+#include "ui/gl/gl_image_dxgi_swap_chain.h"
+#include "ui/gl/trace_util.h"
+
+namespace gpu {
+
+namespace {
+
+GLuint MakeTextureAndSetParameters(gl::GLApi* api, GLenum target) {
+ GLuint service_id = 0;
+ api->glGenTexturesFn(1, &service_id);
+ api->glBindTextureFn(target, service_id);
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
+ return service_id;
+}
+
+bool ClearBackBuffer(Microsoft::WRL::ComPtr<IDXGISwapChain1>& swap_chain,
+ Microsoft::WRL::ComPtr<ID3D11Device>& d3d11_device) {
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture;
+ HRESULT hr = swap_chain->GetBuffer(0, IID_PPV_ARGS(&d3d11_texture));
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "GetBuffer failed with error " << std::hex << hr;
+ return false;
+ }
+ DCHECK(d3d11_texture);
+
+ Microsoft::WRL::ComPtr<ID3D11RenderTargetView> render_target;
+ hr = d3d11_device->CreateRenderTargetView(d3d11_texture.Get(), nullptr,
+ &render_target);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "CreateRenderTargetView failed with error " << std::hex
+ << hr;
+ return false;
+ }
+ DCHECK(render_target);
+
+ Microsoft::WRL::ComPtr<ID3D11DeviceContext> d3d11_device_context;
+ d3d11_device->GetImmediateContext(&d3d11_device_context);
+ DCHECK(d3d11_device_context);
+
+ float color_rgba[4] = {0.0f, 0.0f, 0.0f, 1.0f};
+ d3d11_device_context->ClearRenderTargetView(render_target.Get(), color_rgba);
+ return true;
+}
+
+bool SupportedFormat(viz::ResourceFormat format) {
+ switch (format) {
+ case viz::RGBA_F16:
+ case viz::RGBA_8888:
+ case viz::RGBX_8888:
+ return true;
+ default:
+ return false;
+ };
+}
+
+} // anonymous namespace
+
+// Representation of a SharedImageBackingDXGISwapChain as a GL Texture.
+class SharedImageRepresentationGLTextureDXGISwapChain
+ : public SharedImageRepresentationGLTexture {
+ public:
+ SharedImageRepresentationGLTextureDXGISwapChain(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ texture_(texture) {}
+
+ gles2::Texture* GetTexture() override { return texture_; }
+
+ private:
+ gles2::Texture* const texture_;
+};
+
+// Representation of a SharedImageBackingDXGISwapChain as a GL
+// TexturePassthrough.
+class SharedImageRepresentationGLTexturePassthroughDXGISwapChain
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ SharedImageRepresentationGLTexturePassthroughDXGISwapChain(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
+ : SharedImageRepresentationGLTexturePassthrough(manager,
+ backing,
+ tracker),
+ texture_passthrough_(std::move(texture_passthrough)) {}
+
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override {
+ return texture_passthrough_;
+ }
+
+ private:
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+};
+
+// Implementation of SharedImageBacking that holds buffer (front buffer/back
+// buffer of swap chain) texture (as gles2::Texture/gles2::TexturePassthrough)
+// and a reference to created swap chain.
+class SharedImageBackingDXGISwapChain : public SharedImageBacking {
+ public:
+ SharedImageBackingDXGISwapChain(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain,
+ gles2::Texture* texture,
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ texture ? texture->estimated_size()
+ : passthrough_texture->estimated_size(),
+ false /* is_thread_safe */),
+ swap_chain_(swap_chain),
+ texture_(texture),
+ texture_passthrough_(std::move(passthrough_texture)) {
+ DCHECK(swap_chain_);
+ DCHECK((texture_ && !texture_passthrough_) ||
+ (!texture_ && texture_passthrough_));
+ }
+
+ ~SharedImageBackingDXGISwapChain() override {
+ DCHECK(!swap_chain_);
+ DCHECK(!texture_);
+ DCHECK(!texture_passthrough_);
+ }
+
+ // Texture is cleared on initialization.
+ bool IsCleared() const override { return true; }
+
+ void SetCleared() override {}
+
+ void Update() override {
+ DLOG(ERROR) << "SharedImageBackingDXGISwapChain::Update : Trying to update "
+ "Shared Images associated with swap chain.";
+ }
+
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
+ if (texture_)
+ mailbox_manager->ProduceTexture(mailbox(), texture_);
+ else {
+ DCHECK(texture_passthrough_);
+ mailbox_manager->ProduceTexture(mailbox(), texture_passthrough_.get());
+ }
+ return true;
+ }
+
+ void Destroy() override {
+ if (texture_) {
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
+ } else {
+ DCHECK(texture_passthrough_);
+ if (!have_context())
+ texture_passthrough_->MarkContextLost();
+ texture_passthrough_.reset();
+ }
+
+ DCHECK(swap_chain_);
+ swap_chain_.Reset();
+ }
+
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override {
+ // Add a |service_guid| which expresses shared ownership between the
+ // various GPU dumps.
+ auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ base::trace_event::MemoryAllocatorDumpGuid service_guid;
+ if (texture_)
+ service_guid =
+ gl::GetGLTextureServiceGUIDForTracing(texture_->service_id());
+ else {
+ DCHECK(texture_passthrough_);
+ service_guid = gl::GetGLTextureServiceGUIDForTracing(
+ texture_passthrough_->service_id());
+ }
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+
+ // TODO(piman): coalesce constant with TextureManager::DumpTextureRef.
+ int importance = 2; // This client always owns the ref.
+
+ pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+
+ // Dump all sub-levels held by the texture. They will appear below the
+ // main gl/textures/client_X/mailbox_Y dump.
+ texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
+ }
+
+ bool PresentSwapChain() override {
+ DXGI_PRESENT_PARAMETERS params = {};
+ params.DirtyRectsCount = 0;
+ params.pDirtyRects = nullptr;
+ HRESULT hr =
+ swap_chain_->Present1(0 /* interval */, 0 /* flags */, &params);
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "Present1 failed with error " << std::hex << hr;
+ return false;
+ }
+
+ gl::GLImage* image;
+ unsigned target = GL_TEXTURE_2D;
+ gles2::Texture::ImageState image_state;
+ if (texture_) {
+ image = texture_->GetLevelImage(target, 0, &image_state);
+ } else {
+ DCHECK(texture_passthrough_);
+ image = texture_passthrough_->GetLevelImage(target, 0);
+ }
+ DCHECK(image);
+ DCHECK_EQ(image_state, gles2::Texture::BOUND);
+
+ if (!image->BindTexImage(target)) {
+ DLOG(ERROR) << "Failed to rebind texture to new surface.";
+ return false;
+ }
+ return true;
+ }
+
+ protected:
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationGLTextureDXGISwapChain>(
+ manager, this, tracker, texture_);
+ }
+
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ DCHECK(texture_passthrough_);
+ return std::make_unique<
+ SharedImageRepresentationGLTexturePassthroughDXGISwapChain>(
+ manager, this, tracker, texture_passthrough_);
+ }
+
+ private:
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
+ gles2::Texture* texture_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+ DISALLOW_COPY_AND_ASSIGN(SharedImageBackingDXGISwapChain);
+};
+
+SwapChainFactoryDXGI::SwapChainFactoryDXGI(bool use_passthrough)
+ : use_passthrough_(use_passthrough) {}
+
+SwapChainFactoryDXGI::~SwapChainFactoryDXGI() = default;
+
+SwapChainFactoryDXGI::SwapChainBackings::SwapChainBackings(
+ std::unique_ptr<SharedImageBacking> front_buffer,
+ std::unique_ptr<SharedImageBacking> back_buffer)
+ : front_buffer(std::move(front_buffer)),
+ back_buffer(std::move(back_buffer)) {}
+
+SwapChainFactoryDXGI::SwapChainBackings::~SwapChainBackings() = default;
+
+SwapChainFactoryDXGI::SwapChainBackings::SwapChainBackings(
+ SwapChainFactoryDXGI::SwapChainBackings&&) = default;
+
+SwapChainFactoryDXGI::SwapChainBackings&
+SwapChainFactoryDXGI::SwapChainBackings::operator=(
+ SwapChainFactoryDXGI::SwapChainBackings&&) = default;
+
+std::unique_ptr<SharedImageBacking> SwapChainFactoryDXGI::MakeBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const Microsoft::WRL::ComPtr<IDXGISwapChain1>& swap_chain,
+ int buffer_index) {
+ Microsoft::WRL::ComPtr<ID3D11Texture2D> d3d11_texture;
+ HRESULT hr =
+ swap_chain->GetBuffer(buffer_index, IID_PPV_ARGS(&d3d11_texture));
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "GetBuffer failed with error " << std::hex << hr;
+ return nullptr;
+ }
+ DCHECK(d3d11_texture);
+
+ const unsigned target = GL_TEXTURE_2D;
+ gl::GLApi* api = gl::g_current_gl_context;
+ const GLuint service_id = MakeTextureAndSetParameters(api, target);
+
+ auto image = base::MakeRefCounted<gl::GLImageDXGISwapChain>(
+ size, viz::BufferFormat(format), d3d11_texture, swap_chain);
+ if (!image->BindTexImage(target)) {
+ DLOG(ERROR) << "Failed to bind image to swap chain D3D11 texture.";
+ return nullptr;
+ }
+
+ gles2::Texture* texture = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture;
+
+ if (use_passthrough_) {
+ passthrough_texture =
+ base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
+ passthrough_texture->SetLevelImage(target, 0, image.get());
+ GLint texture_memory_size = 0;
+ api->glGetTexParameterivFn(target, GL_MEMORY_SIZE_ANGLE,
+ &texture_memory_size);
+ passthrough_texture->SetEstimatedSize(texture_memory_size);
+ } else {
+ GLuint internal_format = viz::GLInternalFormat(format);
+ DCHECK_EQ(internal_format, image->GetInternalFormat());
+ GLenum gl_format =
+ gles2::TextureManager::ExtractFormatFromStorageFormat(internal_format);
+ GLenum gl_type =
+ gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
+
+ texture = new gles2::Texture(service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(target, 1);
+ texture->sampler_state_.min_filter = GL_LINEAR;
+ texture->sampler_state_.mag_filter = GL_LINEAR;
+ texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture->SetLevelInfo(target, 0, internal_format, size.width(),
+ size.height(), 1, 0, gl_format, gl_type,
+ gfx::Rect(size));
+ texture->SetLevelImage(target, 0, image.get(), gles2::Texture::BOUND);
+ texture->SetImmutable(true);
+ }
+
+ return std::make_unique<SharedImageBackingDXGISwapChain>(
+ mailbox, format, size, color_space, usage, swap_chain, texture,
+ passthrough_texture);
+}
+
+SwapChainFactoryDXGI::SwapChainBackings SwapChainFactoryDXGI::CreateSwapChain(
+ const Mailbox& front_buffer_mailbox,
+ const Mailbox& back_buffer_mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ if (!SupportedFormat(format)) {
+ DLOG(ERROR) << format << " format is not supported by swap chain.";
+ return {nullptr, nullptr};
+ }
+
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+ DCHECK(d3d11_device);
+ Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
+ d3d11_device.As(&dxgi_device);
+ DCHECK(dxgi_device);
+ Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
+ dxgi_device->GetAdapter(&dxgi_adapter);
+ DCHECK(dxgi_adapter);
+ Microsoft::WRL::ComPtr<IDXGIFactory2> dxgi_factory;
+ dxgi_adapter->GetParent(IID_PPV_ARGS(&dxgi_factory));
+ DCHECK(dxgi_factory);
+
+ DXGI_FORMAT output_format = format == viz::RGBA_F16
+ ? DXGI_FORMAT_R16G16B16A16_FLOAT
+ : DXGI_FORMAT_B8G8R8A8_UNORM;
+
+ DXGI_SWAP_CHAIN_DESC1 desc = {};
+ desc.Width = size.width();
+ desc.Height = size.height();
+ desc.Format = output_format;
+ desc.Stereo = FALSE;
+ desc.SampleDesc.Count = 1;
+ desc.BufferCount = 2;
+ desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
+ desc.Scaling = DXGI_SCALING_STRETCH;
+ desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
+ desc.Flags = 0;
+
+ Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain;
+
+ HRESULT hr = dxgi_factory->CreateSwapChainForComposition(
+ d3d11_device.Get(), &desc, nullptr, &swap_chain);
+
+ if (FAILED(hr)) {
+ DLOG(ERROR) << "CreateSwapChainForComposition failed with error "
+ << std::hex << hr;
+ return {nullptr, nullptr};
+ }
+
+ // Explicitly clear front and back buffers to ensure that there are no
+ // uninitialized pixels.
+ if (!ClearBackBuffer(swap_chain, d3d11_device))
+ return {nullptr, nullptr};
+ DXGI_PRESENT_PARAMETERS params = {};
+ params.DirtyRectsCount = 0;
+ params.pDirtyRects = nullptr;
+ swap_chain->Present1(0 /* interval */, 0 /* flags */, &params);
+ if (!ClearBackBuffer(swap_chain, d3d11_device))
+ return {nullptr, nullptr};
+
+ std::unique_ptr<SharedImageBacking> front_buffer_backing =
+ MakeBacking(front_buffer_mailbox, format, size, color_space, usage,
+ swap_chain, 1 /* buffer index */);
+
+ std::unique_ptr<SharedImageBacking> back_buffer_backing =
+ MakeBacking(back_buffer_mailbox, format, size, color_space, usage,
+ swap_chain, 0 /* buffer index */);
+
+ // To avoid registering one backing when the other backing does not exist.
+ if (!(front_buffer_backing && back_buffer_backing))
+ return {nullptr, nullptr};
+
+ return {std::move(front_buffer_backing), std::move(back_buffer_backing)};
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h
new file mode 100644
index 00000000000..ba21b19f62e
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/swap_chain_factory_dxgi.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SWAP_CHAIN_FACTORY_DXGI_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SWAP_CHAIN_FACTORY_DXGI_H_
+
+#include <windows.h>
+#include <dxgi1_2.h>
+#include <wrl/client.h>
+#include <memory>
+
+#include "components/viz/common/resources/resource_format.h"
+#include "gpu/gpu_gles2_export.h"
+#include "ui/gl/gl_bindings.h"
+
+namespace gfx {
+class Size;
+class ColorSpace;
+} // namespace gfx
+
+namespace gpu {
+class SharedImageBacking;
+struct Mailbox;
+
+class GPU_GLES2_EXPORT SwapChainFactoryDXGI {
+ public:
+ explicit SwapChainFactoryDXGI(bool use_passthrough);
+ ~SwapChainFactoryDXGI();
+
+ struct SwapChainBackings {
+ SwapChainBackings(std::unique_ptr<SharedImageBacking> front_buffer,
+ std::unique_ptr<SharedImageBacking> back_buffer);
+ ~SwapChainBackings();
+ SwapChainBackings(SwapChainBackings&&);
+ SwapChainBackings& operator=(SwapChainBackings&&);
+
+ std::unique_ptr<SharedImageBacking> front_buffer;
+ std::unique_ptr<SharedImageBacking> back_buffer;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SwapChainBackings);
+ };
+
+ // Creates IDXGI Swap Chain and exposes front and back buffers as Shared Image
+ // mailboxes.
+ SwapChainBackings CreateSwapChain(const Mailbox& front_buffer_mailbox,
+ const Mailbox& back_buffer_mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage);
+
+ private:
+ // Wraps the swap chain buffer (front buffer/back buffer) into GLimage and
+ // creates a GL texture and stores it as gles2::Texture or as
+ // gles2::TexturePassthrough in the backing that is created.
+ std::unique_ptr<SharedImageBacking> MakeBacking(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const Microsoft::WRL::ComPtr<IDXGISwapChain1>& swap_chain,
+ int buffer_index);
+ // Whether we're using the passthrough command decoder and should generate
+ // passthrough textures.
+ bool use_passthrough_ = false;
+ DISALLOW_COPY_AND_ASSIGN(SwapChainFactoryDXGI);
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SWAP_CHAIN_FACTORY_DXGI_H_ \ No newline at end of file
diff --git a/chromium/gpu/command_buffer/service/texture_manager.cc b/chromium/gpu/command_buffer/service/texture_manager.cc
index 1d216dac67d..7e545b9888b 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager.cc
@@ -502,8 +502,15 @@ void TextureManager::Destroy() {
DCHECK_EQ(0u, memory_type_tracker_->GetMemRepresented());
}
+TexturePassthrough::LevelInfo::LevelInfo() = default;
+
+TexturePassthrough::LevelInfo::LevelInfo(const LevelInfo& rhs) = default;
+
+TexturePassthrough::LevelInfo::~LevelInfo() = default;
+
TexturePassthrough::TexturePassthrough(GLuint service_id, GLenum target)
: TextureBase(service_id),
+ owned_service_id_(service_id),
have_context_(true),
level_images_(target == GL_TEXTURE_CUBE_MAP ? 6 : 1) {
TextureBase::SetTarget(target);
@@ -512,7 +519,7 @@ TexturePassthrough::TexturePassthrough(GLuint service_id, GLenum target)
TexturePassthrough::~TexturePassthrough() {
DeleteFromMailboxManager();
if (have_context_) {
- glDeleteTextures(1, &service_id_);
+ glDeleteTextures(1, &owned_service_id_);
}
}
@@ -537,37 +544,85 @@ void TexturePassthrough::MarkContextLost() {
void TexturePassthrough::SetLevelImage(GLenum target,
GLint level,
gl::GLImage* image) {
- size_t face_idx = GLES2Util::GLTargetToFaceIndex(target);
- DCHECK(face_idx < level_images_.size());
- DCHECK(level >= 0);
+ SetLevelImageInternal(target, level, image, nullptr, owned_service_id_);
+}
- // Don't allocate space for the images until needed
- if (static_cast<GLint>(level_images_[face_idx].size()) <= level) {
- level_images_[face_idx].resize(level + 1);
+gl::GLImage* TexturePassthrough::GetLevelImage(GLenum target,
+ GLint level) const {
+ size_t face_idx = 0;
+ if (!LevelInfoExists(target, level, &face_idx)) {
+ return nullptr;
}
- level_images_[face_idx][level] = image;
+ return level_images_[face_idx][level].image.get();
}
-gl::GLImage* TexturePassthrough::GetLevelImage(GLenum target,
- GLint level) const {
- if (GLES2Util::GLFaceTargetToTextureTarget(target) != target_) {
+void TexturePassthrough::SetStreamLevelImage(
+ GLenum target,
+ GLint level,
+ GLStreamTextureImage* stream_texture_image,
+ GLuint service_id) {
+ SetLevelImageInternal(target, level, stream_texture_image,
+ stream_texture_image, service_id);
+}
+
+GLStreamTextureImage* TexturePassthrough::GetStreamLevelImage(
+ GLenum target,
+ GLint level) const {
+ size_t face_idx = 0;
+ if (!LevelInfoExists(target, level, &face_idx)) {
return nullptr;
}
+ return level_images_[face_idx][level].stream_texture_image.get();
+}
+
+void TexturePassthrough::SetEstimatedSize(size_t size) {
+ estimated_size_ = size;
+}
+
+bool TexturePassthrough::LevelInfoExists(GLenum target,
+ GLint level,
+ size_t* out_face_idx) const {
+ DCHECK(out_face_idx);
+
+ if (GLES2Util::GLFaceTargetToTextureTarget(target) != target_) {
+ return false;
+ }
+
size_t face_idx = GLES2Util::GLTargetToFaceIndex(target);
DCHECK(face_idx < level_images_.size());
DCHECK(level >= 0);
if (static_cast<GLint>(level_images_[face_idx].size()) <= level) {
- return nullptr;
+ return false;
}
- return level_images_[face_idx][level].get();
+ *out_face_idx = face_idx;
+ return true;
}
-void TexturePassthrough::SetEstimatedSize(size_t size) {
- estimated_size_ = size;
+void TexturePassthrough::SetLevelImageInternal(
+ GLenum target,
+ GLint level,
+ gl::GLImage* image,
+ GLStreamTextureImage* stream_texture_image,
+ GLuint service_id) {
+ size_t face_idx = GLES2Util::GLTargetToFaceIndex(target);
+ DCHECK(face_idx < level_images_.size());
+ DCHECK(level >= 0);
+
+ // Don't allocate space for the images until needed
+ if (static_cast<GLint>(level_images_[face_idx].size()) <= level) {
+ level_images_[face_idx].resize(level + 1);
+ }
+
+ level_images_[face_idx][level].image = image;
+ level_images_[face_idx][level].stream_texture_image = stream_texture_image;
+
+ if (service_id != 0 && service_id != service_id_) {
+ service_id_ = service_id;
+ }
}
Texture::Texture(GLuint service_id)
@@ -690,18 +745,15 @@ Texture::CanRenderCondition Texture::GetCanRenderCondition() const {
if (target_ == 0)
return CAN_RENDER_ALWAYS;
- if (target_ != GL_TEXTURE_EXTERNAL_OES) {
- if (face_infos_.empty() ||
- static_cast<size_t>(base_level_) >= face_infos_[0].level_infos.size()) {
- return CAN_RENDER_NEVER;
- }
- const Texture::LevelInfo& first_face =
- face_infos_[0].level_infos[base_level_];
- if (first_face.width == 0 ||
- first_face.height == 0 ||
- first_face.depth == 0) {
- return CAN_RENDER_NEVER;
- }
+ if (face_infos_.empty() ||
+ static_cast<size_t>(base_level_) >= face_infos_[0].level_infos.size()) {
+ return CAN_RENDER_NEVER;
+ }
+ const Texture::LevelInfo& first_face =
+ face_infos_[0].level_infos[base_level_];
+ if (first_face.width == 0 || first_face.height == 0 ||
+ first_face.depth == 0) {
+ return CAN_RENDER_NEVER;
}
if (target_ == GL_TEXTURE_CUBE_MAP && !cube_complete())
@@ -1144,6 +1196,40 @@ void Texture::UpdateMaxLevel(GLint max_level) {
UpdateNumMipLevels();
}
+void Texture::UpdateFaceNumMipLevels(size_t face_index,
+ GLint width,
+ GLint height,
+ GLint depth) {
+ DCHECK_LT(face_index, face_infos_.size());
+ DCHECK_LE(0, base_level_);
+ Texture::FaceInfo& face_info = face_infos_[face_index];
+ if (static_cast<size_t>(base_level_) >= face_info.level_infos.size()) {
+ face_info.num_mip_levels = 0;
+ } else {
+ DCHECK_LE(1u, face_info.level_infos.size());
+ GLint safe_max_level = std::min(
+ max_level_, static_cast<GLint>(face_info.level_infos.size() - 1));
+ GLint max_num_mip_levels = std::max(0, safe_max_level - base_level_ + 1);
+ face_info.num_mip_levels = std::min(
+ max_num_mip_levels,
+ TextureManager::ComputeMipMapCount(target_, width, height, depth));
+ }
+}
+
+void Texture::UpdateFaceNumMipLevels(size_t face_index) {
+ DCHECK_LT(face_index, face_infos_.size());
+ DCHECK_LE(0, base_level_);
+ Texture::FaceInfo& face_info = face_infos_[face_index];
+ GLint width = 0, height = 0, depth = 0;
+ if (static_cast<size_t>(base_level_) < face_info.level_infos.size()) {
+ const Texture::LevelInfo& info = face_info.level_infos[base_level_];
+ width = info.width;
+ height = info.height;
+ depth = info.depth;
+ }
+ UpdateFaceNumMipLevels(face_index, width, height, depth);
+}
+
void Texture::UpdateNumMipLevels() {
if (face_infos_.empty())
return;
@@ -1160,16 +1246,8 @@ void Texture::UpdateNumMipLevels() {
base_level_ = unclamped_base_level_;
max_level_ = unclamped_max_level_;
}
- GLint max_num_mip_levels = std::max(0, max_level_ - base_level_ + 1);
- for (size_t ii = 0; ii < face_infos_.size(); ++ii) {
- Texture::FaceInfo& face_info = face_infos_[ii];
- if (static_cast<size_t>(base_level_) >= face_info.level_infos.size())
- continue;
- const Texture::LevelInfo& info = face_info.level_infos[base_level_];
- face_info.num_mip_levels = std::min(
- max_num_mip_levels, TextureManager::ComputeMipMapCount(
- target_, info.width, info.height, info.depth));
- }
+ for (size_t ii = 0; ii < face_infos_.size(); ++ii)
+ UpdateFaceNumMipLevels(ii);
// mipmap-completeness needs to be re-evaluated.
completeness_dirty_ = true;
@@ -1214,10 +1292,7 @@ void Texture::SetLevelInfo(GLenum target,
info.width != width || info.height != height || info.depth != depth ||
info.format != format || info.type != type || info.internal_workaround) {
if (level == base_level_) {
- // Calculate the mip level count.
- face_infos_[face_index].num_mip_levels = std::min(
- std::max(0, max_level_ - base_level_ + 1),
- TextureManager::ComputeMipMapCount(target_, width, height, depth));
+ UpdateFaceNumMipLevels(face_index, width, height, depth);
// Update NPOT face count for the first level.
bool prev_npot = TextureIsNPOT(info.width, info.height, info.depth);
@@ -4024,7 +4099,8 @@ bool Texture::CompatibleWithSamplerUniformType(
level_info->format == GL_DEPTH_STENCIL ||
level_info->format == GL_LUMINANCE_ALPHA ||
level_info->format == GL_LUMINANCE || level_info->format == GL_ALPHA ||
- level_info->format == GL_BGRA_EXT || level_info->format == GL_SRGB_EXT;
+ level_info->format == GL_BGRA_EXT || level_info->format == GL_SRGB_EXT ||
+ level_info->format == GL_SRGB_ALPHA_EXT;
if (normalized) {
// All normalized texture formats are sampled as float.
return category == SAMPLER_FLOAT;
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 5cd4704c51d..b1e0cce58b2 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -50,6 +50,9 @@ class SharedImageRepresentationSkiaGLAHB;
class SharedImageBackingIOSurface;
class SharedImageRepresentationGLTextureIOSurface;
class SharedImageRepresentationSkiaIOSurface;
+class SharedImageBackingDXGISwapChain;
+class StreamTexture;
+class SwapChainFactoryDXGI;
namespace gles2 {
class GLStreamTextureImage;
@@ -83,6 +86,12 @@ class GPU_GLES2_EXPORT TexturePassthrough final
void SetLevelImage(GLenum target, GLint level, gl::GLImage* image);
gl::GLImage* GetLevelImage(GLenum target, GLint level) const;
+ void SetStreamLevelImage(GLenum target,
+ GLint level,
+ GLStreamTextureImage* stream_texture_image,
+ GLuint service_id);
+ GLStreamTextureImage* GetStreamLevelImage(GLenum target, GLint level) const;
+
// Return true if and only if the decoder should BindTexImage / CopyTexImage
// us before sampling.
bool is_bind_pending() const { return is_bind_pending_; }
@@ -97,15 +106,34 @@ class GPU_GLES2_EXPORT TexturePassthrough final
~TexturePassthrough() override;
private:
+ bool LevelInfoExists(GLenum target, GLint level, size_t* out_face_idx) const;
+
+ void SetLevelImageInternal(GLenum target,
+ GLint level,
+ gl::GLImage* image,
+ GLStreamTextureImage* stream_texture_image,
+ GLuint service_id);
+
friend class base::RefCounted<TexturePassthrough>;
+ GLuint owned_service_id_ = 0;
+
bool have_context_;
bool is_bind_pending_ = false;
size_t estimated_size_ = 0;
// Bound images divided into faces and then levels
- std::vector<std::vector<scoped_refptr<gl::GLImage>>> level_images_;
+ struct LevelInfo {
+ LevelInfo();
+ LevelInfo(const LevelInfo& rhs);
+ ~LevelInfo();
+
+ scoped_refptr<gl::GLImage> image;
+ scoped_refptr<GLStreamTextureImage> stream_texture_image;
+ };
+
+ std::vector<std::vector<LevelInfo>> level_images_;
DISALLOW_COPY_AND_ASSIGN(TexturePassthrough);
};
@@ -386,8 +414,11 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
friend class gpu::SharedImageRepresentationGLTextureAHB;
friend class gpu::SharedImageRepresentationSkiaGLAHB;
friend class gpu::SharedImageBackingIOSurface;
+ friend class gpu::SharedImageBackingDXGISwapChain;
+ friend class gpu::SwapChainFactoryDXGI;
friend class gpu::SharedImageRepresentationGLTextureIOSurface;
friend class gpu::SharedImageRepresentationSkiaIOSurface;
+ friend class gpu::StreamTexture;
friend class AbstractTextureImplOnSharedContext;
friend class TextureDefinition;
friend class TextureManager;
@@ -607,6 +638,11 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
void UpdateBaseLevel(GLint base_level, const FeatureInfo* feature_info);
void UpdateMaxLevel(GLint max_level);
+ void UpdateFaceNumMipLevels(size_t face_index,
+ GLint width,
+ GLint height,
+ GLint depth);
+ void UpdateFaceNumMipLevels(size_t face_index);
void UpdateNumMipLevels();
// Increment the generation counter for all managers that have a reference to
diff --git a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
index 70084fa6750..4a3de4b410c 100644
--- a/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
+++ b/chromium/gpu/command_buffer/service/texture_manager_unittest.cc
@@ -756,12 +756,12 @@ TEST_F(TextureTest, SetTargetTextureExternalOES) {
EXPECT_FALSE(TextureTestHelper::IsCubeComplete(texture));
EXPECT_FALSE(manager_->CanGenerateMipmaps(texture_ref_.get()));
EXPECT_TRUE(TextureTestHelper::IsNPOT(texture));
- EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
EXPECT_TRUE(texture->SafeToRenderFrom());
EXPECT_TRUE(texture->IsImmutable());
}
-TEST_F(TextureTest, ZeroSizeCanNotRender) {
+TEST_F(TextureTest, ZeroSizeCanNotRender2D) {
manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_2D);
EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_2D, 0, GL_RGBA, 1, 1, 1,
@@ -772,6 +772,19 @@ TEST_F(TextureTest, ZeroSizeCanNotRender) {
EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
}
+TEST_F(TextureTest, ZeroSizeCanNotRenderExternalOES) {
+ manager_->SetTarget(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES);
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+ manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES, 0,
+ GL_RGBA, 1, 1, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ gfx::Rect(1, 1));
+ EXPECT_TRUE(manager_->CanRender(texture_ref_.get()));
+ manager_->SetLevelInfo(texture_ref_.get(), GL_TEXTURE_EXTERNAL_OES, 0,
+ GL_RGBA, 0, 0, 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ gfx::Rect());
+ EXPECT_FALSE(manager_->CanRender(texture_ref_.get()));
+}
+
TEST_F(TextureTest, CanRenderTo) {
TestHelper::SetupFeatureInfoInitExpectations(gl_.get(), "");
scoped_refptr<FeatureInfo> feature_info(new FeatureInfo());
diff --git a/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc b/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
index c8435dcbf07..262cf87cc82 100644
--- a/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
+++ b/chromium/gpu/command_buffer/service/transfer_buffer_manager.cc
@@ -20,8 +20,6 @@
#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/service/memory_tracking.h"
-using ::base::SharedMemory;
-
namespace gpu {
TransferBufferManager::TransferBufferManager(MemoryTracker* memory_tracker)
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder.cc b/chromium/gpu/command_buffer/service/webgpu_decoder.cc
index 6291b7f034d..6ea1c803434 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder.cc
@@ -17,9 +17,13 @@ namespace webgpu {
WebGPUDecoder* WebGPUDecoder::Create(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
+ SharedImageManager* shared_image_manager,
+ MemoryTracker* memory_tracker,
gles2::Outputter* outputter) {
#if BUILDFLAG(USE_DAWN)
- return CreateWebGPUDecoderImpl(client, command_buffer_service, outputter);
+ return CreateWebGPUDecoderImpl(client, command_buffer_service,
+ shared_image_manager, memory_tracker,
+ outputter);
#else
NOTREACHED();
return nullptr;
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder.h b/chromium/gpu/command_buffer/service/webgpu_decoder.h
index 03b313718e4..5d63844e338 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder.h
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder.h
@@ -13,6 +13,8 @@
namespace gpu {
class DecoderClient;
+class MemoryTracker;
+class SharedImageManager;
namespace gles2 {
class Outputter;
@@ -25,6 +27,8 @@ class GPU_GLES2_EXPORT WebGPUDecoder : public DecoderContext,
public:
static WebGPUDecoder* Create(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
+ SharedImageManager* shared_image_manager,
+ MemoryTracker* memory_tracker,
gles2::Outputter* outputter);
~WebGPUDecoder() override;
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index 9b2552e1bbf..d5a19011e7b 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -13,10 +13,15 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "base/trace_event/trace_event.h"
+#include "gpu/command_buffer/common/mailbox.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
#include "gpu/command_buffer/common/webgpu_cmd_ids.h"
#include "gpu/command_buffer/service/command_buffer_service.h"
#include "gpu/command_buffer/service/decoder_client.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/webgpu_decoder.h"
#include "ipc/ipc_channel.h"
@@ -77,6 +82,13 @@ void* WireServerCommandSerializer::GetCmdSpace(size_t size) {
bool WireServerCommandSerializer::Flush() {
if (put_offset_ > 0) {
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WireServerCommandSerializer::Flush", "bytes", put_offset_);
+
+ static uint32_t return_trace_id = 0;
+ TRACE_EVENT_FLOW_BEGIN0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "DawnReturnCommands", return_trace_id++);
+
client_->HandleReturnData(base::make_span(buffer_.data(), put_offset_));
put_offset_ = 0;
}
@@ -89,6 +101,8 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
public:
WebGPUDecoderImpl(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
+ SharedImageManager* shared_image_manager,
+ MemoryTracker* memory_tracker,
gles2::Outputter* outputter);
~WebGPUDecoderImpl() override;
@@ -165,6 +179,8 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
void PerformPollingWork() override {
DCHECK(dawn_device_);
DCHECK(wire_serializer_);
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUDecoderImpl::PerformPollingWork");
dawn_procs_.deviceTick(dawn_device_);
wire_serializer_->Flush();
}
@@ -310,6 +326,14 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
DawnDevice CreateDefaultDevice();
+ std::unique_ptr<SharedImageRepresentationFactory>
+ shared_image_representation_factory_;
+ // Map from the <ID, generation> pair for a wire texture to the shared image
+ // representation for it.
+ base::flat_map<std::tuple<uint32_t, uint32_t>,
+ std::unique_ptr<SharedImageRepresentationDawn>>
+ associated_shared_image_map_;
+
std::unique_ptr<WireServerCommandSerializer> wire_serializer_;
std::unique_ptr<dawn_native::Instance> dawn_instance_;
DawnProcTable dawn_procs_;
@@ -334,20 +358,31 @@ constexpr WebGPUDecoderImpl::CommandInfo WebGPUDecoderImpl::command_info[] = {
WebGPUDecoder* CreateWebGPUDecoderImpl(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
+ SharedImageManager* shared_image_manager,
+ MemoryTracker* memory_tracker,
gles2::Outputter* outputter) {
- return new WebGPUDecoderImpl(client, command_buffer_service, outputter);
+ return new WebGPUDecoderImpl(client, command_buffer_service,
+ shared_image_manager, memory_tracker, outputter);
}
WebGPUDecoderImpl::WebGPUDecoderImpl(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
+ SharedImageManager* shared_image_manager,
+ MemoryTracker* memory_tracker,
gles2::Outputter* outputter)
: WebGPUDecoder(client, command_buffer_service, outputter),
+ shared_image_representation_factory_(
+ std::make_unique<SharedImageRepresentationFactory>(
+ shared_image_manager,
+ memory_tracker)),
wire_serializer_(new WireServerCommandSerializer(client)),
dawn_instance_(new dawn_native::Instance()),
dawn_procs_(dawn_native::GetProcs()) {}
WebGPUDecoderImpl::~WebGPUDecoderImpl() {
+ associated_shared_image_map_.clear();
+
// Reset the wire server first so all objects are destroyed before the device.
// TODO(enga): Handle Device/Context lost.
wire_server_ = nullptr;
@@ -499,6 +534,12 @@ error::Error WebGPUDecoderImpl::HandleDawnCommands(
return error::kOutOfBounds;
}
+ TRACE_EVENT_FLOW_END0(
+ TRACE_DISABLED_BY_DEFAULT("gpu.dawn"), "DawnCommands",
+ (static_cast<uint64_t>(commands_shm_id) << 32) + commands_shm_offset);
+
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("gpu.dawn"),
+ "WebGPUDecoderImpl::HandleDawnCommands", "bytes", size);
std::vector<char> commands(shm_commands, shm_commands + size);
if (!wire_server_->HandleCommands(commands.data(), size)) {
NOTREACHED();
@@ -508,5 +549,104 @@ error::Error WebGPUDecoderImpl::HandleDawnCommands(
return error::kNoError;
}
+error::Error WebGPUDecoderImpl::HandleAssociateMailboxImmediate(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile webgpu::cmds::AssociateMailboxImmediate& c =
+ *static_cast<const volatile webgpu::cmds::AssociateMailboxImmediate*>(
+ cmd_data);
+
+ uint32_t device_id = static_cast<uint32_t>(c.device_id);
+ uint32_t device_generation = static_cast<uint32_t>(c.device_generation);
+ uint32_t id = static_cast<uint32_t>(c.id);
+ uint32_t generation = static_cast<uint32_t>(c.generation);
+ uint32_t usage = static_cast<DawnTextureUsageBit>(c.usage);
+
+ // Unpack the mailbox
+ if (sizeof(Mailbox) > immediate_data_size) {
+ return error::kOutOfBounds;
+ }
+ volatile const GLbyte* mailbox_bytes =
+ gles2::GetImmediateDataAs<volatile const GLbyte*>(c, sizeof(Mailbox),
+ immediate_data_size);
+ if (mailbox_bytes == nullptr) {
+ return error::kOutOfBounds;
+ }
+ Mailbox mailbox = Mailbox::FromVolatile(
+ *reinterpret_cast<const volatile Mailbox*>(mailbox_bytes));
+ DLOG_IF(ERROR, !mailbox.Verify())
+ << "AssociateMailbox was passed an invalid mailbox";
+
+ // TODO(cwallez@chromium.org): Use device_id/generation when the decoder
+ // supports multiple devices.
+ if (device_id != 0 || device_generation != 0) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid device ID";
+ return error::kInvalidArguments;
+ }
+
+ static constexpr uint32_t kAllowedTextureUsages = static_cast<uint32_t>(
+ DAWN_TEXTURE_USAGE_BIT_TRANSFER_SRC |
+ DAWN_TEXTURE_USAGE_BIT_TRANSFER_DST | DAWN_TEXTURE_USAGE_BIT_SAMPLED |
+ DAWN_TEXTURE_USAGE_BIT_OUTPUT_ATTACHMENT);
+ if (usage & ~kAllowedTextureUsages) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid usage";
+ return error::kInvalidArguments;
+ }
+ DawnTextureUsageBit dawn_usage = static_cast<DawnTextureUsageBit>(usage);
+
+ // Create a DawnTexture from the mailbox.
+ std::unique_ptr<SharedImageRepresentationDawn> shared_image =
+ shared_image_representation_factory_->ProduceDawn(mailbox, dawn_device_);
+ if (!shared_image) {
+ DLOG(ERROR) << "AssociateMailbox: Couldn't produce shared image";
+ return error::kInvalidArguments;
+ }
+
+ DawnTexture texture = shared_image->BeginAccess(dawn_usage);
+ if (!texture) {
+ DLOG(ERROR) << "AssociateMailbox: Couldn't begin shared image access";
+ return error::kInvalidArguments;
+ }
+
+ // Inject the texture in the dawn_wire::Server and remember which shared image
+ // it is associated with.
+ if (!wire_server_->InjectTexture(texture, id, generation)) {
+ DLOG(ERROR) << "AssociateMailbox: Invalid texture ID";
+ return error::kInvalidArguments;
+ }
+
+ std::tuple<uint32_t, uint32_t> id_and_generation{id, generation};
+ auto insertion = associated_shared_image_map_.emplace(
+ id_and_generation, std::move(shared_image));
+
+ // InjectTexture already validated that the (ID, generation) can't have been
+ // registered before.
+ DCHECK(insertion.second);
+
+ return error::kNoError;
+}
+
+error::Error WebGPUDecoderImpl::HandleDissociateMailbox(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile webgpu::cmds::DissociateMailbox& c =
+ *static_cast<const volatile webgpu::cmds::DissociateMailbox*>(cmd_data);
+
+ uint32_t texture_id = static_cast<uint32_t>(c.texture_id);
+ uint32_t texture_generation = static_cast<uint32_t>(c.texture_generation);
+
+ std::tuple<uint32_t, uint32_t> id_and_generation{texture_id,
+ texture_generation};
+ auto it = associated_shared_image_map_.find(id_and_generation);
+ if (it == associated_shared_image_map_.end()) {
+ DLOG(ERROR) << "DissociateMailbox: Invalid texture ID";
+ return error::kInvalidArguments;
+ }
+
+ it->second->EndAccess();
+ associated_shared_image_map_.erase(it);
+ return error::kNoError;
+}
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h
index d91f20c4558..25b424bdede 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.h
@@ -11,6 +11,8 @@ namespace gpu {
class CommandBufferServiceBase;
class DecoderClient;
+class MemoryTracker;
+class SharedImageManager;
namespace gles2 {
class Outputter;
@@ -23,6 +25,8 @@ class WebGPUDecoder;
GPU_GLES2_EXPORT WebGPUDecoder* CreateWebGPUDecoderImpl(
DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
+ SharedImageManager* shared_image_manager,
+ MemoryTracker* memory_tracker,
gles2::Outputter* outputter);
} // namespace webgpu
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
index 97877930340..f71be008421 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_unittest.cc
@@ -5,10 +5,13 @@
#include "gpu/command_buffer/service/webgpu_decoder.h"
#include "gpu/command_buffer/client/client_test_helper.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/common/webgpu_cmd_format.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_client.h"
#include "gpu/command_buffer/service/gpu_tracer.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/command_buffer/service/test_helper.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -26,10 +29,22 @@ class WebGPUDecoderTest : public ::testing::Test {
void SetUp() override {
command_buffer_service_.reset(new FakeCommandBufferServiceBase());
decoder_.reset(WebGPUDecoder::Create(nullptr, command_buffer_service_.get(),
+ &shared_image_manager_, nullptr,
&outputter_));
if (decoder_->Initialize() != ContextResult::kSuccess) {
decoder_ = nullptr;
}
+
+ factory_ = std::make_unique<SharedImageFactory>(
+ GpuPreferences(), GpuDriverBugWorkarounds(), GpuFeatureInfo(),
+ /*context_state=*/nullptr, /*mailbox_manager=*/nullptr,
+ &shared_image_manager_, /*image_factory=*/nullptr, /*tracker=*/nullptr,
+ /*enable_wrapped_sk_image=*/false);
+ }
+
+ void TearDown() override {
+ factory_->DestroyAllSharedImages(true);
+ factory_.reset();
}
bool WebGPUSupported() const { return decoder_ != nullptr; }
@@ -44,10 +59,22 @@ class WebGPUDecoderTest : public ::testing::Test {
&entries_processed);
}
+ template <typename T>
+ error::Error ExecuteImmediateCmd(const T& cmd, size_t data_size) {
+ static_assert(T::kArgFlags == cmd::kAtLeastN,
+ "T::kArgFlags should equal cmd::kAtLeastN");
+ int entries_processed = 0;
+ return decoder_->DoCommands(1, (const void*)&cmd,
+ ComputeNumEntries(sizeof(cmd) + data_size),
+ &entries_processed);
+ }
+
protected:
std::unique_ptr<FakeCommandBufferServiceBase> command_buffer_service_;
std::unique_ptr<WebGPUDecoder> decoder_;
gles2::TraceOutputter outputter_;
+ SharedImageManager shared_image_manager_;
+ std::unique_ptr<SharedImageFactory> factory_;
scoped_refptr<gles2::ContextGroup> group_;
};
@@ -61,5 +88,125 @@ TEST_F(WebGPUDecoderTest, DawnCommands) {
cmd.Init(0, 0, 0);
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
}
+
+struct AssociateMailboxCmdStorage {
+ cmds::AssociateMailboxImmediate cmd;
+ GLbyte data[GL_MAILBOX_SIZE_CHROMIUM];
+};
+
+TEST_F(WebGPUDecoderTest, AssociateMailbox) {
+ if (!WebGPUSupported()) {
+ LOG(ERROR) << "Test skipped because WebGPU isn't supported";
+ return;
+ }
+
+ gpu::Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ EXPECT_TRUE(factory_->CreateSharedImage(
+ mailbox, viz::ResourceFormat::RGBA_8888, {1, 1},
+ gfx::ColorSpace::CreateSRGB(), SHARED_IMAGE_USAGE_WEBGPU));
+
+ // Error case: invalid mailbox
+ {
+ gpu::Mailbox bad_mailbox;
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_BIT_SAMPLED, bad_mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(bad_mailbox.name)));
+ }
+
+ // Error case: device doesn't exist.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(42, 42, 1, 0, DAWN_TEXTURE_USAGE_BIT_SAMPLED, mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Error case: texture ID invalid for the wire server.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 42, 42, DAWN_TEXTURE_USAGE_BIT_SAMPLED, mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Error case: invalid usage.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 42, 42, DAWN_TEXTURE_USAGE_BIT_SAMPLED, mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Error case: invalid texture usage.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_BIT_FORCE32, mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Control case: test a successful call to AssociateMailbox
+ // (1, 0) is a valid texture ID on dawn_wire server start.
+ // The control case is not put first because it modifies the internal state
+ // of the Dawn wire server and would make calls with the same texture ID
+ // and generation invalid.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_BIT_SAMPLED, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Error case: associated to an already associated texture.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_BIT_SAMPLED, mailbox.name);
+ EXPECT_EQ(error::kInvalidArguments,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Dissociate the image from the control case to remove its reference.
+ {
+ cmds::DissociateMailbox cmd;
+ cmd.Init(1, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
+TEST_F(WebGPUDecoderTest, DissociateMailbox) {
+ if (!WebGPUSupported()) {
+ LOG(ERROR) << "Test skipped because WebGPU isn't supported";
+ return;
+ }
+
+ gpu::Mailbox mailbox = Mailbox::GenerateForSharedImage();
+ EXPECT_TRUE(factory_->CreateSharedImage(
+ mailbox, viz::ResourceFormat::RGBA_8888, {1, 1},
+ gfx::ColorSpace::CreateSRGB(), SHARED_IMAGE_USAGE_WEBGPU));
+
+ // Associate a mailbox so we can later dissociate it.
+ {
+ AssociateMailboxCmdStorage cmd;
+ cmd.cmd.Init(0, 0, 1, 0, DAWN_TEXTURE_USAGE_BIT_SAMPLED, mailbox.name);
+ EXPECT_EQ(error::kNoError,
+ ExecuteImmediateCmd(cmd.cmd, sizeof(mailbox.name)));
+ }
+
+ // Error case: wrong texture ID
+ {
+ cmds::DissociateMailbox cmd;
+ cmd.Init(42, 42);
+ EXPECT_EQ(error::kInvalidArguments, ExecuteCmd(cmd));
+ }
+
+ // Success case
+ {
+ cmds::DissociateMailbox cmd;
+ cmd.Init(1, 0);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ }
+}
+
} // namespace webgpu
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index 2529841b745..448104240c3 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -12,6 +12,7 @@
#include "base/trace_event/trace_event.h"
#include "components/viz/common/resources/resource_format_utils.h"
#include "gpu/command_buffer/common/shared_image_trace_utils.h"
+#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
@@ -22,6 +23,7 @@
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrTypes.h"
#include "ui/gl/gl_context.h"
+#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/trace_util.h"
namespace gpu {
@@ -44,8 +46,8 @@ class WrappedSkImage : public SharedImageBacking {
}
void Destroy() override {
- DCHECK(!!image_);
- image_.reset();
+ DCHECK(backend_texture_.isValid());
+ context_state_->gr_context()->deleteBackendTexture(backend_texture_);
}
bool IsCleared() const override { return cleared_; }
@@ -69,19 +71,21 @@ class WrappedSkImage : public SharedImageBacking {
pmd->AddOwnershipEdge(client_guid, service_guid, importance);
}
+ SkColorType GetSkColorType() {
+ return viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+ }
+
sk_sp<SkSurface> GetSkSurface(int final_msaa_count,
- SkColorType color_type,
- sk_sp<SkColorSpace> color_space,
const SkSurfaceProps& surface_props) {
if (context_state_->context_lost())
return nullptr;
DCHECK(context_state_->IsCurrent(nullptr));
- GrBackendTexture gr_texture =
- image_->getBackendTexture(/*flushPendingGrContextIO=*/true);
- DCHECK(gr_texture.isValid());
- return SkSurface::MakeFromBackendTextureAsRenderTarget(
- context_state_->gr_context(), gr_texture, kTopLeft_GrSurfaceOrigin,
- final_msaa_count, color_type, color_space, &surface_props);
+
+ return SkSurface::MakeFromBackendTexture(
+ context_state_->gr_context(), backend_texture_,
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, GetSkColorType(),
+ color_space().ToSkColorSpace(), &surface_props);
}
sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
@@ -120,45 +124,55 @@ class WrappedSkImage : public SharedImageBacking {
context_state_->set_need_context_state_reset(true);
- if (data.empty()) {
- auto surface = SkSurface::MakeRenderTarget(context_state_->gr_context(),
- SkBudgeted::kNo, info);
- if (!surface)
- return false;
+ backend_texture_ = context_state_->gr_context()->createBackendTexture(
+ size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo,
+ GrRenderable::kYes);
+
+ if (!backend_texture_.isValid())
+ return false;
+
+#if DCHECK_IS_ON()
+ bool need_temporary_surface = true;
+#else
+ bool need_temporary_surface = !data.empty();
+#endif
+
+ sk_sp<SkSurface> surface =
+ need_temporary_surface
+ ? SkSurface::MakeFromBackendTexture(
+ context_state_->gr_context(), backend_texture_,
+ kTopLeft_GrSurfaceOrigin, /*sampleCnt=*/0, GetSkColorType(),
+ color_space().ToSkColorSpace(), /*surfaceProps=*/nullptr)
+ : nullptr;
+
+#if DCHECK_IS_ON()
+ {
+ auto* canvas = surface->getCanvas();
+ canvas->clear(SK_ColorGREEN);
+ }
+#endif
- image_ = surface->makeImageSnapshot();
- } else {
+ if (!data.empty()) {
SkBitmap bitmap;
if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()),
info.minRowBytes())) {
return false;
}
- sk_sp<SkImage> image = SkImage::MakeFromBitmap(bitmap);
- if (!image)
- return false;
- image_ = image->makeTextureImage(context_state_->gr_context(),
- image->colorSpace());
+ surface->writePixels(bitmap, /*dstX=*/0, /*dstY=*/0);
}
- if (!image_ || !image_->isTextureBacked())
- return false;
-
- auto gr_texture =
- image_->getBackendTexture(/*flushPendingGrContextIO=*/false);
- if (!gr_texture.isValid())
- return false;
- promise_texture_ = SkPromiseImageTexture::Make(gr_texture);
+ promise_texture_ = SkPromiseImageTexture::Make(backend_texture_);
- switch (gr_texture.backend()) {
+ switch (backend_texture_.backend()) {
case GrBackendApi::kOpenGL: {
GrGLTextureInfo tex_info;
- if (gr_texture.getGLTextureInfo(&tex_info))
+ if (backend_texture_.getGLTextureInfo(&tex_info))
tracing_id_ = tex_info.fID;
break;
}
case GrBackendApi::kVulkan: {
GrVkImageInfo image_info;
- if (gr_texture.getVkImageInfo(&image_info))
+ if (backend_texture_.getVkImageInfo(&image_info))
tracing_id_ = reinterpret_cast<uint64_t>(image_info.fImage);
break;
}
@@ -171,7 +185,7 @@ class WrappedSkImage : public SharedImageBacking {
SharedContextState* const context_state_;
- sk_sp<SkImage> image_;
+ GrBackendTexture backend_texture_;
sk_sp<SkPromiseImageTexture> promise_texture_;
bool cleared_ = false;
@@ -192,13 +206,11 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
sk_sp<SkSurface> BeginWriteAccess(
int final_msaa_count,
- const SkSurfaceProps& surface_props) override {
- SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format());
-
- auto surface = wrapped_sk_image()->GetSkSurface(
- final_msaa_count, sk_color_type,
- backing()->color_space().ToSkColorSpace(), surface_props);
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ auto surface =
+ wrapped_sk_image()->GetSkSurface(final_msaa_count, surface_props);
write_surface_ = surface.get();
return surface;
}
@@ -209,11 +221,15 @@ class WrappedSkImageRepresentation : public SharedImageRepresentationSkia {
write_surface_ = nullptr;
}
- sk_sp<SkPromiseImageTexture> BeginReadAccess() override {
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override {
+ DCHECK(!write_surface_);
return wrapped_sk_image()->promise_texture();
}
void EndReadAccess() override {
+ DCHECK(!write_surface_);
// TODO(ericrk): Handle begin/end correctness checks.
}
@@ -277,6 +293,11 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
return nullptr;
}
+bool WrappedSkImageFactory::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return false;
+}
+
std::unique_ptr<SharedImageRepresentationSkia> WrappedSkImage::ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.h b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
index 8529d9b269a..b549965f3af 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.h
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.h
@@ -51,6 +51,8 @@ class GPU_GLES2_EXPORT WrappedSkImageFactory
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
private:
SharedContextState* const context_state_;
diff --git a/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt
index bcb656291ed..53dbe0052a3 100644
--- a/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/webgpu_cmd_buffer_functions.txt
@@ -4,6 +4,8 @@
// This file is read by build_webgpu_cmd_buffer.py to generate commands.
-// WebGPU commands. Note the first 2 characters (usually 'gl') are
+// WebGPU commands. Note the first 2 characters (usually 'wg') are
// completely ignored.
GL_APICALL void GL_APIENTRY wgDawnCommands (const char* commands, size_t size);
+GL_APICALL void GL_APIENTRY wgAssociateMailbox (GLuint device_id, GLuint device_generation, GLuint id, GLuint generation, GLuint usage, const GLbyte* mailbox);
+GL_APICALL void GL_APIENTRY wgDissociateMailbox (GLuint texture_id, GLuint texture_generation);
diff --git a/chromium/gpu/config/gpu_blacklist.cc b/chromium/gpu/config/gpu_blacklist.cc
index e7e24c8dfc2..66eebdd1a16 100644
--- a/chromium/gpu/config/gpu_blacklist.cc
+++ b/chromium/gpu/config/gpu_blacklist.cc
@@ -47,7 +47,7 @@ std::unique_ptr<GpuBlacklist> GpuBlacklist::Create(
GPU_FEATURE_TYPE_OOP_RASTERIZATION);
list->AddSupportedFeature("android_surface_control",
GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL);
-
+ list->AddSupportedFeature("metal", GPU_FEATURE_TYPE_METAL);
return list;
}
diff --git a/chromium/gpu/config/gpu_crash_keys.cc b/chromium/gpu/config/gpu_crash_keys.cc
index a51c27ad6e8..6f512b0d8f6 100644
--- a/chromium/gpu/config/gpu_crash_keys.cc
+++ b/chromium/gpu/config/gpu_crash_keys.cc
@@ -22,14 +22,6 @@ crash_reporter::CrashKeyString<128> gpu_renderer("gpu-gl-renderer");
#endif
crash_reporter::CrashKeyString<4> gpu_gl_context_is_virtual(
"gpu-gl-context-is-virtual");
-crash_reporter::CrashKeyString<20> seconds_since_last_progress_report(
- "seconds-since-last-progress-report");
-crash_reporter::CrashKeyString<20> seconds_since_last_suspend(
- "seconds-since-last-suspend");
-crash_reporter::CrashKeyString<20> seconds_since_last_resume(
- "seconds-since-last-resume");
-crash_reporter::CrashKeyString<20> seconds_since_last_logging(
- "seconds-since-last-logging");
crash_reporter::CrashKeyString<20> available_physical_memory_in_mb(
"available-physical-memory-in-mb");
diff --git a/chromium/gpu/config/gpu_crash_keys.h b/chromium/gpu/config/gpu_crash_keys.h
index e6bee5b8b83..df1ccf1591f 100644
--- a/chromium/gpu/config/gpu_crash_keys.h
+++ b/chromium/gpu/config/gpu_crash_keys.h
@@ -28,11 +28,6 @@ extern GPU_EXPORT crash_reporter::CrashKeyString<128> gpu_renderer;
#endif
extern GPU_EXPORT crash_reporter::CrashKeyString<4> gpu_gl_context_is_virtual;
extern GPU_EXPORT crash_reporter::CrashKeyString<20>
- seconds_since_last_progress_report;
-extern GPU_EXPORT crash_reporter::CrashKeyString<20> seconds_since_last_suspend;
-extern GPU_EXPORT crash_reporter::CrashKeyString<20> seconds_since_last_resume;
-extern GPU_EXPORT crash_reporter::CrashKeyString<20> seconds_since_last_logging;
-extern GPU_EXPORT crash_reporter::CrashKeyString<20>
available_physical_memory_in_mb;
} // namespace crash_keys
diff --git a/chromium/gpu/config/gpu_driver_bug_list.cc b/chromium/gpu/config/gpu_driver_bug_list.cc
index af5331d274c..df20136e155 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list.cc
@@ -77,13 +77,13 @@ void GpuDriverBugList::AppendWorkaroundsFromCommandLine(
// Removing conflicting workarounds.
switch (kFeatureList[i].type) {
- case FORCE_DISCRETE_GPU:
- workarounds->erase(FORCE_INTEGRATED_GPU);
- workarounds->insert(FORCE_DISCRETE_GPU);
+ case FORCE_HIGH_PERFORMANCE_GPU:
+ workarounds->erase(FORCE_LOW_POWER_GPU);
+ workarounds->insert(FORCE_HIGH_PERFORMANCE_GPU);
break;
- case FORCE_INTEGRATED_GPU:
- workarounds->erase(FORCE_DISCRETE_GPU);
- workarounds->insert(FORCE_INTEGRATED_GPU);
+ case FORCE_LOW_POWER_GPU:
+ workarounds->erase(FORCE_HIGH_PERFORMANCE_GPU);
+ workarounds->insert(FORCE_LOW_POWER_GPU);
break;
default:
workarounds->insert(kFeatureList[i].type);
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index c54a3811cce..506120d8443 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -348,7 +348,7 @@
"value": "2"
},
"features": [
- "force_discrete_gpu"
+ "force_high_performance_gpu"
]
},
{
@@ -1865,10 +1865,6 @@
},
"gl_type": "gl",
"gl_version_string": ".*Mesa.*",
- "direct_rendering_version": {
- "op" : "<",
- "value": "2.3"
- },
"features": [
"disable_post_sub_buffers_for_onscreen_surfaces"
]
@@ -2363,7 +2359,7 @@
"device_id": ["0x0116", "0x0126"],
"multi_gpu_style": "amd_switchable",
"features": [
- "force_discrete_gpu"
+ "force_high_performance_gpu"
]
},
{
@@ -3067,41 +3063,6 @@
]
},
{
- "id": 287,
- "description": "glCopyTexImage2D on Adreno fails if source is GL_RGB10_A2 and destination is not.",
- "cr_bugs": [925986],
- "os": {
- "type": "android",
- "version": {
- "op": ">=",
- "value": "5.0.0"
- }
- },
- "gl_vendor": "Qualcomm.*",
- "gl_renderer": ".*4\\d\\d",
- "gl_renderer": "Adreno \\(TM\\) [345].*",
- "features": [
- "disable_copy_tex_image_2d_rgb10_a2_adreno"
- ]
- },
- {
- "id": 288,
- "description": "glCopyTexImage2D on NVIDIA Tegra fails in certain cases if source is GL_RGB10_A2.",
- "cr_bugs": [925986],
- "os": {
- "type": "android"
- },
- "gl_vendor": "NVIDIA.*",
- "gl_type": "gles",
- "gl_version": {
- "op": ">=",
- "value": "3.0"
- },
- "features": [
- "disable_copy_tex_image_2d_rgb10_a2_tegra"
- ]
- },
- {
"id": 289,
"description": "Fake entry for testing command buffer init failures on ES 2.0",
"cr_bugs": [923134],
@@ -3266,32 +3227,19 @@
"description": "Mesa hangs the system when allocating large textures",
"cr_bugs": [927470],
"os": {
- "type" : "linux"
- },
- "driver_vendor": "Mesa",
- "driver_version": {
- "op": ">=",
- "value": "18"
+ "type" : "linux",
+ "version": {
+ "op": "<",
+ "value": "5.0"
+ }
},
+ "vendor_id": "0x8086",
"features": [
"max_texture_size_limit_4096",
"max_3d_array_texture_size_1024"
]
},
{
- "id": 302,
- "description": "glCopyTexImage2D on Mali-T820 fails in certain cases if source is GL_RGB10_A2.",
- "cr_bugs": [953771],
- "os": {
- "type": "android"
- },
- "gl_vendor": "ARM.*",
- "gl_renderer": "Mali-T820",
- "features": [
- "disable_copy_tex_image_2d_rgb10_a2_mali"
- ]
- },
- {
"id": 303,
"cr_bugs": [890227],
"description": "Dynamic texture map crashes on Intel drivers less than version 24",
@@ -3304,7 +3252,36 @@
"value": "24"
},
"features": [
- "disable_nv12_dynamic_textures"
+ "disable_nv12_dynamic_textures"
+ ]
+ },
+ {
+ "id": 304,
+ "cr_bugs": [911349],
+ "description": "Video processor blit to swap chain results in column of black pixels on older NVIDIA drivers at certain scaling factors",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x10de",
+ "driver_version": {
+ "op": "<",
+ "value": "419"
+ },
+ "features": [
+ "disable_direct_composition_layers"
+ ]
+ },
+ {
+ "id": 306,
+ "description": "Program binaries don't contain transform feedback varyings on Mali GPUs",
+ "cr_bugs": [961950],
+ "os": {
+ "type": "android"
+ },
+ "gl_vendor": "ARM.*",
+ "gl_renderer": "Mali.*",
+ "features": [
+ "disable_program_caching_for_transform_feedback"
]
}
]
diff --git a/chromium/gpu/config/gpu_driver_bug_list_unittest.cc b/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
index 58739a41a17..5da59857337 100644
--- a/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
+++ b/chromium/gpu/config/gpu_driver_bug_list_unittest.cc
@@ -59,17 +59,17 @@ TEST_F(GpuDriverBugListTest, AppendSingleWorkaround) {
TEST_F(GpuDriverBugListTest, AppendForceGPUWorkaround) {
base::CommandLine command_line(0, nullptr);
command_line.AppendSwitch(
- GpuDriverBugWorkaroundTypeToString(FORCE_DISCRETE_GPU));
+ GpuDriverBugWorkaroundTypeToString(FORCE_HIGH_PERFORMANCE_GPU));
std::set<int> workarounds;
workarounds.insert(EXIT_ON_CONTEXT_LOST);
- workarounds.insert(FORCE_INTEGRATED_GPU);
+ workarounds.insert(FORCE_LOW_POWER_GPU);
EXPECT_EQ(2u, workarounds.size());
- EXPECT_EQ(1u, workarounds.count(FORCE_INTEGRATED_GPU));
+ EXPECT_EQ(1u, workarounds.count(FORCE_LOW_POWER_GPU));
GpuDriverBugList::AppendWorkaroundsFromCommandLine(
&workarounds, command_line);
EXPECT_EQ(2u, workarounds.size());
- EXPECT_EQ(0u, workarounds.count(FORCE_INTEGRATED_GPU));
- EXPECT_EQ(1u, workarounds.count(FORCE_DISCRETE_GPU));
+ EXPECT_EQ(0u, workarounds.count(FORCE_LOW_POWER_GPU));
+ EXPECT_EQ(1u, workarounds.count(FORCE_HIGH_PERFORMANCE_GPU));
}
// Test for invariant "Assume the newly last added entry has the largest ID".
diff --git a/chromium/gpu/config/gpu_feature_info.h b/chromium/gpu/config/gpu_feature_info.h
index e78b2f8ec6d..9748fa5dccc 100644
--- a/chromium/gpu/config/gpu_feature_info.h
+++ b/chromium/gpu/config/gpu_feature_info.h
@@ -11,6 +11,10 @@
#include "gpu/config/gpu_feature_type.h"
#include "gpu/gpu_export.h"
+namespace gfx {
+enum class BufferFormat;
+}
+
namespace gl {
class GLContext;
} // namespace gl
@@ -27,23 +31,6 @@ enum GpuFeatureStatus {
kGpuFeatureStatusMax
};
-enum AntialiasingMode {
- kAntialiasingModeUnspecified,
- kAntialiasingModeNone,
- kAntialiasingModeMSAAImplicitResolve,
- kAntialiasingModeMSAAExplicitResolve,
- kAntialiasingModeScreenSpaceAntialiasing,
-};
-
-struct GPU_EXPORT WebglPreferences {
- AntialiasingMode anti_aliasing_mode = kAntialiasingModeUnspecified;
- uint32_t msaa_sample_count = 8;
- uint32_t eqaa_storage_sample_count = 4;
- // WebGL-specific numeric limits.
- uint32_t max_active_webgl_contexts = 0;
- uint32_t max_active_webgl_contexts_on_worker = 0;
-};
-
struct GPU_EXPORT GpuFeatureInfo {
GpuFeatureInfo();
GpuFeatureInfo(const GpuFeatureInfo&);
@@ -71,12 +58,15 @@ struct GPU_EXPORT GpuFeatureInfo {
std::string disabled_extensions;
// Disabled WebGL extensions separated by whitespaces.
std::string disabled_webgl_extensions;
- // Preferences for webgl.
- WebglPreferences webgl_preferences;
// Applied gpu blacklist entry indices.
std::vector<uint32_t> applied_gpu_blacklist_entries;
// Applied gpu driver bug list entry indices.
std::vector<uint32_t> applied_gpu_driver_bug_list_entries;
+
+ // BufferFormats that can be allocated and then bound, if known and provided
+ // by the platform.
+ std::vector<gfx::BufferFormat>
+ supported_buffer_formats_for_allocation_and_texturing;
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_feature_type.h b/chromium/gpu/config/gpu_feature_type.h
index a9684b9943f..5ac3ab5c4f8 100644
--- a/chromium/gpu/config/gpu_feature_type.h
+++ b/chromium/gpu/config/gpu_feature_type.h
@@ -23,6 +23,7 @@ enum GpuFeatureType {
GPU_FEATURE_TYPE_PROTECTED_VIDEO_DECODE,
GPU_FEATURE_TYPE_OOP_RASTERIZATION,
GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL,
+ GPU_FEATURE_TYPE_METAL,
NUMBER_OF_GPU_FEATURE_TYPES
};
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index 5b8e38fffe8..e64de67ee2d 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -43,7 +43,7 @@ const base::Feature kAndroidSurfaceControl{"AndroidSurfaceControl",
// Enable GPU Rasterization by default. This can still be overridden by
// --force-gpu-rasterization or --disable-gpu-rasterization.
#if defined(OS_MACOSX) || defined(OS_WIN) || defined(OS_CHROMEOS) || \
- defined(OS_ANDROID)
+ defined(OS_ANDROID) || defined(OS_FUCHSIA)
// DefaultEnableGpuRasterization has launched on Mac, Windows, ChromeOS, and
// Android.
const base::Feature kDefaultEnableGpuRasterization{
@@ -63,17 +63,23 @@ const base::Feature kDefaultEnableOopRasterization{
const base::Feature kDefaultPassthroughCommandDecoder{
"DefaultPassthroughCommandDecoder", base::FEATURE_DISABLED_BY_DEFAULT};
-
-// Overrides preferred overlay format to NV12 instead of YUY2.
-const base::Feature kDirectCompositionPreferNV12Overlays{
- "DirectCompositionPreferNV12Overlays", base::FEATURE_ENABLED_BY_DEFAULT};
-
// Allow putting a video swapchain underneath the main swapchain, so overlays
// can be used even if there are controls on top of the video. It can be
// enabled only when overlay is supported.
const base::Feature kDirectCompositionUnderlays{
"DirectCompositionUnderlays", base::FEATURE_ENABLED_BY_DEFAULT};
+// Allow GPU watchdog to keep waiting for ackowledgement if one is already
+// issued from the monitored thread.
+const base::Feature kGpuWatchdogNoTerminationAwaitingAcknowledge{
+ "GpuWatchdogNoTerminationAwaitingAcknowledge",
+ base::FEATURE_DISABLED_BY_DEFAULT};
+
+#if defined(OS_MACOSX)
+// Enable use of Metal for OOP rasterization.
+const base::Feature kMetal{"Metal", base::FEATURE_DISABLED_BY_DEFAULT};
+#endif
+
// Causes us to use the SharedImageManager, removing support for the old
// mailbox system. Any consumers of the GPU process using the old mailbox
// system will experience undefined results.
@@ -86,11 +92,6 @@ const base::Feature kUseDCOverlaysForSoftwareProtectedVideo{
"UseDCOverlaysForSoftwareProtectedVideo",
base::FEATURE_DISABLED_BY_DEFAULT};
-// Use decode swap chain created from compatible video decoder buffers.
-const base::Feature kDirectCompositionUseNV12DecodeSwapChain{
- "DirectCompositionUseNV12DecodeSwapChain",
- base::FEATURE_ENABLED_BY_DEFAULT};
-
// Controls the decode acceleration of JPEG images (as opposed to camera
// captures) in Chrome OS using the VA-API.
// TODO(andrescj): remove or enable by default in Chrome OS once
diff --git a/chromium/gpu/config/gpu_finch_features.h b/chromium/gpu/config/gpu_finch_features.h
index 1e668915577..3d22294a7de 100644
--- a/chromium/gpu/config/gpu_finch_features.h
+++ b/chromium/gpu/config/gpu_finch_features.h
@@ -27,16 +27,19 @@ GPU_EXPORT extern const base::Feature kDefaultEnableOopRasterization;
GPU_EXPORT extern const base::Feature kDefaultPassthroughCommandDecoder;
-GPU_EXPORT extern const base::Feature kDirectCompositionPreferNV12Overlays;
-
GPU_EXPORT extern const base::Feature kDirectCompositionUnderlays;
+GPU_EXPORT extern const base::Feature
+ kGpuWatchdogNoTerminationAwaitingAcknowledge;
+
+#if defined(OS_MACOSX)
+GPU_EXPORT extern const base::Feature kMetal;
+#endif
+
GPU_EXPORT extern const base::Feature kSharedImageManager;
GPU_EXPORT extern const base::Feature kUseDCOverlaysForSoftwareProtectedVideo;
-GPU_EXPORT extern const base::Feature kDirectCompositionUseNV12DecodeSwapChain;
-
GPU_EXPORT extern const base::Feature kVaapiJpegImageDecodeAcceleration;
#if defined(OS_ANDROID)
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index 15e6b3014ec..7b9fddb29c1 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -94,16 +94,6 @@ void EnumerateImageDecodeAcceleratorSupportedProfile(
}
#if defined(OS_WIN)
-void EnumerateOverlayCapability(const gpu::OverlayCapability& cap,
- gpu::GPUInfo::Enumerator* enumerator) {
- std::string key_string = "overlayCap";
- key_string += OverlayFormatToString(cap.format);
- enumerator->BeginOverlayCapability();
- enumerator->AddString(key_string.c_str(),
- cap.is_scaling_supported ? "SCALING" : "DIRECT");
- enumerator->EndOverlayCapability();
-}
-
void EnumerateDx12VulkanVersionInfo(const gpu::Dx12VulkanVersionInfo& info,
gpu::GPUInfo::Enumerator* enumerator) {
enumerator->BeginDx12VulkanVersionInfo();
@@ -121,22 +111,17 @@ void EnumerateDx12VulkanVersionInfo(const gpu::Dx12VulkanVersionInfo& info,
namespace gpu {
#if defined(OS_WIN)
-const char* OverlayFormatToString(OverlayFormat format) {
- switch (format) {
- case OverlayFormat::kBGRA:
- return "BGRA";
- case OverlayFormat::kYUY2:
- return "YUY2";
- case OverlayFormat::kNV12:
- return "NV12";
+const char* OverlaySupportToString(gpu::OverlaySupport support) {
+ switch (support) {
+ case gpu::OverlaySupport::kNone:
+ return "NONE";
+ case gpu::OverlaySupport::kDirect:
+ return "DIRECT";
+ case gpu::OverlaySupport::kScaling:
+ return "SCALING";
}
}
-
-bool OverlayCapability::operator==(const OverlayCapability& other) const {
- return format == other.format &&
- is_scaling_supported == other.is_scaling_supported;
-}
-#endif
+#endif // OS_WIN
VideoDecodeAcceleratorCapabilities::VideoDecodeAcceleratorCapabilities()
: flags(0) {}
@@ -252,7 +237,8 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
#if defined(OS_WIN)
bool direct_composition;
bool supports_overlays;
- OverlayCapabilities overlay_capabilities;
+ OverlaySupport yuy2_overlay_support;
+ OverlaySupport nv12_overlay_support;
DxDiagNode dx_diagnostics;
Dx12VulkanVersionInfo dx12_vulkan_version_info;
#endif
@@ -288,8 +274,7 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
EnumerateGPUDevice(secondary_gpu, enumerator);
enumerator->BeginAuxAttributes();
- enumerator->AddTimeDeltaInSecondsF("initializationTime",
- initialization_time);
+ enumerator->AddTimeDeltaInSecondsF("initializationTime", initialization_time);
enumerator->AddBool("optimus", optimus);
enumerator->AddBool("amdSwitchable", amd_switchable);
enumerator->AddString("pixelShaderVersion", pixel_shader_version);
@@ -317,12 +302,16 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
#if defined(OS_WIN)
enumerator->AddBool("directComposition", direct_composition);
enumerator->AddBool("supportsOverlays", supports_overlays);
- for (const auto& cap : overlay_capabilities)
- EnumerateOverlayCapability(cap, enumerator);
+ enumerator->AddString("yuy2OverlaySupport",
+ OverlaySupportToString(yuy2_overlay_support));
+ enumerator->AddString("nv12OverlaySupport",
+ OverlaySupportToString(nv12_overlay_support));
EnumerateDx12VulkanVersionInfo(dx12_vulkan_version_info, enumerator);
#endif
enumerator->AddInt("videoDecodeAcceleratorFlags",
video_decode_accelerator_capabilities.flags);
+
+ // TODO(crbug.com/966839): Fix the two supported profile dumping below.
for (const auto& profile :
video_decode_accelerator_capabilities.supported_profiles)
EnumerateVideoDecodeAcceleratorSupportedProfile(profile, enumerator);
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index 15a2b8d9e07..8aef7721d75 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -154,19 +154,9 @@ using ImageDecodeAcceleratorSupportedProfiles =
std::vector<ImageDecodeAcceleratorSupportedProfile>;
#if defined(OS_WIN)
-// Common overlay formats that we're interested in. Must match the OverlayFormat
-// enum in //tools/metrics/histograms/enums.xml. Mapped to corresponding DXGI
-// formats in DirectCompositionSurfaceWin.
-enum class OverlayFormat { kBGRA = 0, kYUY2 = 1, kNV12 = 2, kMaxValue = kNV12 };
+enum class OverlaySupport { kNone = 0, kDirect = 1, kScaling = 2 };
-GPU_EXPORT const char* OverlayFormatToString(OverlayFormat format);
-
-struct GPU_EXPORT OverlayCapability {
- OverlayFormat format;
- bool is_scaling_supported;
- bool operator==(const OverlayCapability& other) const;
-};
-using OverlayCapabilities = std::vector<OverlayCapability>;
+GPU_EXPORT const char* OverlaySupportToString(OverlaySupport support);
struct GPU_EXPORT Dx12VulkanVersionInfo {
bool IsEmpty() const { return !d3d12_feature_level && !vulkan_version; }
@@ -323,8 +313,8 @@ struct GPU_EXPORT GPUInfo {
// True if we use direct composition surface overlays on Windows.
bool supports_overlays = false;
-
- OverlayCapabilities overlay_capabilities;
+ OverlaySupport yuy2_overlay_support = OverlaySupport::kNone;
+ OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
// The information returned by the DirectX Diagnostics Tool.
DxDiagNode dx_diagnostics;
@@ -391,9 +381,6 @@ struct GPU_EXPORT GPUInfo {
virtual void BeginAuxAttributes() = 0;
virtual void EndAuxAttributes() = 0;
- virtual void BeginOverlayCapability() = 0;
- virtual void EndOverlayCapability() = 0;
-
virtual void BeginDx12VulkanVersionInfo() = 0;
virtual void EndDx12VulkanVersionInfo() = 0;
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index 79d1a7f4cba..ad0e520f37f 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -20,7 +20,6 @@
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
#include "base/trace_event/trace_event.h"
-#include "gpu/config/gpu_preferences.h"
#include "gpu/config/gpu_switches.h"
#include "third_party/angle/src/gpu_info_util/SystemInfo.h" // nogncheck
#include "third_party/skia/include/core/SkGraphics.h"
@@ -171,8 +170,7 @@ bool CollectBasicGraphicsInfo(const base::CommandLine* command_line,
return CollectBasicGraphicsInfo(gpu_info);
}
-bool CollectGraphicsInfoGL(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectGraphicsInfoGL(GPUInfo* gpu_info) {
TRACE_EVENT0("startup", "gpu_info_collector::CollectGraphicsInfoGL");
DCHECK_NE(gl::GetGLImplementation(), gl::kGLImplementationNone);
@@ -341,24 +339,22 @@ void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
if (system_info->gpus.empty()) {
return;
}
- if (system_info->primaryGPUIndex < 0) {
- system_info->primaryGPUIndex = 0;
+ if (system_info->activeGPUIndex < 0) {
+ system_info->activeGPUIndex = 0;
}
- angle::GPUDeviceInfo* primary =
- &system_info->gpus[system_info->primaryGPUIndex];
+ angle::GPUDeviceInfo* active =
+ &system_info->gpus[system_info->activeGPUIndex];
- gpu_info->gpu.vendor_id = primary->vendorId;
- gpu_info->gpu.device_id = primary->deviceId;
- gpu_info->gpu.driver_vendor = std::move(primary->driverVendor);
- gpu_info->gpu.driver_version = std::move(primary->driverVersion);
- gpu_info->gpu.driver_date = std::move(primary->driverDate);
- if (system_info->primaryGPUIndex == system_info->activeGPUIndex) {
- gpu_info->gpu.active = true;
- }
+ gpu_info->gpu.vendor_id = active->vendorId;
+ gpu_info->gpu.device_id = active->deviceId;
+ gpu_info->gpu.driver_vendor = std::move(active->driverVendor);
+ gpu_info->gpu.driver_version = std::move(active->driverVersion);
+ gpu_info->gpu.driver_date = std::move(active->driverDate);
+ gpu_info->gpu.active = true;
for (size_t i = 0; i < system_info->gpus.size(); i++) {
- if (static_cast<int>(i) == system_info->primaryGPUIndex) {
+ if (static_cast<int>(i) == system_info->activeGPUIndex) {
continue;
}
@@ -368,9 +364,6 @@ void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
device.driver_vendor = std::move(system_info->gpus[i].driverVendor);
device.driver_version = std::move(system_info->gpus[i].driverVersion);
device.driver_date = std::move(system_info->gpus[i].driverDate);
- if (static_cast<int>(i) == system_info->activeGPUIndex) {
- device.active = true;
- }
gpu_info->secondary_gpus.push_back(device);
}
@@ -385,7 +378,7 @@ void FillGPUInfoFromSystemInfo(GPUInfo* gpu_info,
void CollectGraphicsInfoForTesting(GPUInfo* gpu_info) {
DCHECK(gpu_info);
#if defined(OS_ANDROID)
- CollectContextGraphicsInfo(gpu_info, GpuPreferences());
+ CollectContextGraphicsInfo(gpu_info);
#else
CollectBasicGraphicsInfo(gpu_info);
#endif // OS_ANDROID
diff --git a/chromium/gpu/config/gpu_info_collector.h b/chromium/gpu/config/gpu_info_collector.h
index 9421bab49d9..10d3fef7b65 100644
--- a/chromium/gpu/config/gpu_info_collector.h
+++ b/chromium/gpu/config/gpu_info_collector.h
@@ -20,7 +20,6 @@ class CommandLine;
}
namespace gpu {
-struct GpuPreferences;
// Collects basic GPU info without creating a GL/DirectX context (and without
// the danger of crashing), including vendor_id and device_id.
@@ -35,9 +34,7 @@ GPU_EXPORT bool CollectBasicGraphicsInfo(const base::CommandLine* command_line,
// Create a GL/DirectX context and collect related info.
// This is called at GPU process startup time.
-GPU_EXPORT bool CollectContextGraphicsInfo(
- GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences);
+GPU_EXPORT bool CollectContextGraphicsInfo(GPUInfo* gpu_info);
#if defined(OS_WIN)
// Collect the DirectX Disagnostics information about the attached displays.
@@ -47,8 +44,7 @@ GPU_EXPORT void RecordGpuSupportedRuntimeVersionHistograms(
#endif // OS_WIN
// Create a GL context and collect GL strings and versions.
-GPU_EXPORT bool CollectGraphicsInfoGL(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences);
+GPU_EXPORT bool CollectGraphicsInfoGL(GPUInfo* gpu_info);
// If more than one GPUs are identified, and GL strings are available,
// identify the active GPU based on GL strings.
diff --git a/chromium/gpu/config/gpu_info_collector_android.cc b/chromium/gpu/config/gpu_info_collector_android.cc
index 1e7b09442b9..f8ba4bdf49b 100644
--- a/chromium/gpu/config/gpu_info_collector_android.cc
+++ b/chromium/gpu/config/gpu_info_collector_android.cc
@@ -12,8 +12,7 @@
namespace gpu {
-bool CollectContextGraphicsInfo(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
// When command buffer is compiled as a standalone library, the process might
// not have a Java environment.
if (base::android::IsVMInitialized()) {
@@ -22,7 +21,7 @@ bool CollectContextGraphicsInfo(GPUInfo* gpu_info,
}
// At this point GL bindings have been initialized already.
- return CollectGraphicsInfoGL(gpu_info, gpu_preferences);
+ return CollectGraphicsInfoGL(gpu_info);
}
bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
diff --git a/chromium/gpu/config/gpu_info_collector_fuchsia.cc b/chromium/gpu/config/gpu_info_collector_fuchsia.cc
index d285e4bb563..0288191a68c 100644
--- a/chromium/gpu/config/gpu_info_collector_fuchsia.cc
+++ b/chromium/gpu/config/gpu_info_collector_fuchsia.cc
@@ -8,13 +8,12 @@
namespace gpu {
-bool CollectContextGraphicsInfo(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
- return CollectGraphicsInfoGL(gpu_info, gpu_preferences);
+ return CollectGraphicsInfoGL(gpu_info);
}
bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
diff --git a/chromium/gpu/config/gpu_info_collector_linux.cc b/chromium/gpu/config/gpu_info_collector_linux.cc
index 87834a1f66d..2ecc1922ce5 100644
--- a/chromium/gpu/config/gpu_info_collector_linux.cc
+++ b/chromium/gpu/config/gpu_info_collector_linux.cc
@@ -4,18 +4,16 @@
#include "base/trace_event/trace_event.h"
#include "gpu/config/gpu_info_collector.h"
-#include "gpu/config/gpu_preferences.h"
#include "third_party/angle/src/gpu_info_util/SystemInfo.h"
namespace gpu {
-bool CollectContextGraphicsInfo(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
- return CollectGraphicsInfoGL(gpu_info, gpu_preferences);
+ return CollectGraphicsInfoGL(gpu_info);
}
bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
diff --git a/chromium/gpu/config/gpu_info_collector_mac.mm b/chromium/gpu/config/gpu_info_collector_mac.mm
index 3550b55ecf6..62b5c074de5 100644
--- a/chromium/gpu/config/gpu_info_collector_mac.mm
+++ b/chromium/gpu/config/gpu_info_collector_mac.mm
@@ -9,12 +9,11 @@
namespace gpu {
-bool CollectContextGraphicsInfo(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
DCHECK(gpu_info);
TRACE_EVENT0("gpu", "gpu_info_collector::CollectGraphicsInfo");
- return CollectGraphicsInfoGL(gpu_info, gpu_preferences);
+ return CollectGraphicsInfoGL(gpu_info);
}
bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
diff --git a/chromium/gpu/config/gpu_info_collector_unittest.cc b/chromium/gpu/config/gpu_info_collector_unittest.cc
index 77aa910c116..6fbdc287992 100644
--- a/chromium/gpu/config/gpu_info_collector_unittest.cc
+++ b/chromium/gpu/config/gpu_info_collector_unittest.cc
@@ -12,7 +12,6 @@
#include "base/strings/string_split.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_info_collector.h"
-#include "gpu/config/gpu_preferences.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "ui/gl/gl_context_stub.h"
@@ -208,7 +207,7 @@ INSTANTIATE_TEST_SUITE_P(GPUConfig,
// be fixed.
TEST_P(GPUInfoCollectorTest, CollectGraphicsInfoGL) {
GPUInfo gpu_info;
- CollectGraphicsInfoGL(&gpu_info, GpuPreferences());
+ CollectGraphicsInfoGL(&gpu_info);
#if defined(OS_WIN)
if (GetParam() == kMockedWindows) {
EXPECT_EQ(test_values_.gpu.driver_vendor, gpu_info.gpu.driver_vendor);
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index 4b0453b89f0..4220cd9090d 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -557,13 +557,12 @@ void RecordGpuSupportedRuntimeVersionHistograms(Dx12VulkanVersionInfo* info) {
}
}
-bool CollectContextGraphicsInfo(GPUInfo* gpu_info,
- const GpuPreferences& gpu_preferences) {
+bool CollectContextGraphicsInfo(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectGraphicsInfo");
DCHECK(gpu_info);
- if (!CollectGraphicsInfoGL(gpu_info, gpu_preferences))
+ if (!CollectGraphicsInfoGL(gpu_info))
return false;
// ANGLE's renderer strings are of the form:
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index ee09ad0f264..69156200ab0 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "eca637952e5dc4b02f1f7a70e68a7ab0094ccd1b"
+#define GPU_LISTS_VERSION "9c159000ae39faf1c6f82b40307e199d48cd3667"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_preferences.h b/chromium/gpu/config/gpu_preferences.h
index 838aab5ce60..aa5a09aa462 100644
--- a/chromium/gpu/config/gpu_preferences.h
+++ b/chromium/gpu/config/gpu_preferences.h
@@ -44,15 +44,6 @@ struct GPU_EXPORT GpuPreferences {
// If return false, |this| won't be touched.
bool FromSwitchValue(const std::string& data);
- // Support for accelerated vpx decoding for various vendors,
- // intended to be used as a bitfield.
- // VPX_VENDOR_ALL should be updated whenever a new entry is added.
- enum VpxDecodeVendors {
- VPX_VENDOR_NONE = 0x00,
- VPX_VENDOR_MICROSOFT = 0x01,
- VPX_VENDOR_AMD = 0x02,
- VPX_VENDOR_ALL = 0x03,
- };
// ===================================
// Settings from //content/public/common/content_switches.h
@@ -72,10 +63,6 @@ struct GPU_EXPORT GpuPreferences {
// Starts the GPU sandbox before creating a GL context.
bool gpu_sandbox_start_early = false;
- // Enables experimental hardware acceleration for VP8/VP9 video decoding.
- // Bitmask - 0x1=Microsoft, 0x2=AMD, 0x03=Try all. Windows only.
- VpxDecodeVendors enable_accelerated_vpx_decode = VPX_VENDOR_MICROSOFT;
-
// Enables using CODECAPI_AVLowLatencyMode. Windows only.
bool enable_low_latency_dxva = true;
@@ -152,10 +139,6 @@ struct GPU_EXPORT GpuPreferences {
// ===================================
// Settings from //gpu/config/gpu_switches.h
- // Allows user to override the maximum number of WebGL contexts. A value of 0
- // uses the defaults, which are encoded in the GPU process's code.
- uint32_t max_active_webgl_contexts = 0;
-
// Enables the use of SurfaceControl for overlays on Android.
bool enable_android_surface_control = false;
@@ -212,6 +195,10 @@ struct GPU_EXPORT GpuPreferences {
// testing in order to detect regressions which crash Vulkan.
bool disable_vulkan_fallback_to_gl_for_testing = false;
+ // Use Metal for rasterization and Skia-based display compositing. Note that
+ // this is compatible with GL-based display compositing.
+ bool enable_metal = false;
+
// ===================================
// Settings from //cc/base/switches.h
// Enable the GPU benchmarking extension; used by tests only.
diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc
index 14708613b86..ca358f260a5 100644
--- a/chromium/gpu/config/gpu_preferences_unittest.cc
+++ b/chromium/gpu/config/gpu_preferences_unittest.cc
@@ -21,8 +21,6 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
EXPECT_EQ(left.gpu_startup_dialog, right.gpu_startup_dialog);
EXPECT_EQ(left.disable_gpu_watchdog, right.disable_gpu_watchdog);
EXPECT_EQ(left.gpu_sandbox_start_early, right.gpu_sandbox_start_early);
- EXPECT_EQ(left.enable_accelerated_vpx_decode,
- right.enable_accelerated_vpx_decode);
EXPECT_EQ(left.enable_low_latency_dxva, right.enable_low_latency_dxva);
EXPECT_EQ(left.enable_zero_copy_dxgi_video,
right.enable_zero_copy_dxgi_video);
@@ -55,7 +53,6 @@ void CheckGpuPreferencesEqual(GpuPreferences left, GpuPreferences right) {
right.enable_threaded_texture_mailboxes);
EXPECT_EQ(left.gl_shader_interm_output, right.gl_shader_interm_output);
EXPECT_EQ(left.emulate_shader_precision, right.emulate_shader_precision);
- EXPECT_EQ(left.max_active_webgl_contexts, right.max_active_webgl_contexts);
EXPECT_EQ(left.enable_gpu_service_logging, right.enable_gpu_service_logging);
EXPECT_EQ(left.enable_gpu_service_tracing, right.enable_gpu_service_tracing);
EXPECT_EQ(left.use_passthrough_cmd_decoder,
@@ -113,8 +110,6 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(gpu_startup_dialog, true)
GPU_PREFERENCES_FIELD(disable_gpu_watchdog, true)
GPU_PREFERENCES_FIELD(gpu_sandbox_start_early, true)
- GPU_PREFERENCES_FIELD(enable_accelerated_vpx_decode,
- GpuPreferences::VPX_VENDOR_AMD)
GPU_PREFERENCES_FIELD(enable_low_latency_dxva, false)
GPU_PREFERENCES_FIELD(enable_zero_copy_dxgi_video, true)
GPU_PREFERENCES_FIELD(enable_nv12_dxgi_video, true)
@@ -138,7 +133,6 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_threaded_texture_mailboxes, true)
GPU_PREFERENCES_FIELD(gl_shader_interm_output, true)
GPU_PREFERENCES_FIELD(emulate_shader_precision, true)
- GPU_PREFERENCES_FIELD(max_active_webgl_contexts, 1)
GPU_PREFERENCES_FIELD(enable_gpu_service_logging, true)
GPU_PREFERENCES_FIELD(enable_gpu_service_tracing, true)
GPU_PREFERENCES_FIELD(use_passthrough_cmd_decoder, true)
diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc
index cd1f363e896..de51ed27d64 100644
--- a/chromium/gpu/config/gpu_switches.cc
+++ b/chromium/gpu/config/gpu_switches.cc
@@ -33,26 +33,18 @@ const char kGpuPreferences[] = "gpu-preferences";
// Ignores GPU blacklist.
const char kIgnoreGpuBlacklist[] = "ignore-gpu-blacklist";
-// Allows user to override maximum number of active WebGL contexts per
-// renderer process.
-const char kMaxActiveWebGLContexts[] = "max-active-webgl-contexts";
-
// Allows explicitly specifying the shader disk cache size for embedded devices.
// Default value is 6MB. On Android, 2MB is default and 128KB for low-end
// devices.
const char kShaderDiskCacheSizeKB[] = "shader-disk-cache-size-kb";
-// Set the antialiasing method used for webgl. (none, explicit, implicit, or
-// screenspace)
-const char kWebglAntialiasingMode[] = "webgl-antialiasing-mode";
-
-// Set a default sample count for webgl if msaa is enabled.
-const char kWebglMSAASampleCount[] = "webgl-msaa-sample-count";
-
// Disables the non-sandboxed GPU process for DX12 and Vulkan info collection
const char kDisableGpuProcessForDX12VulkanInfoCollection[] =
"disable-gpu-process-for-dx12-vulkan-info-collection";
const char kEnableUnsafeWebGPU[] = "enable-unsafe-webgpu";
+// Enables WebGL overlays for Windows.
+const char kEnableWebGLSwapChain[] = "enable-webgl-swap-chain";
+
} // namespace switches
diff --git a/chromium/gpu/config/gpu_switches.h b/chromium/gpu/config/gpu_switches.h
index fa533cf4bbd..61349b81987 100644
--- a/chromium/gpu/config/gpu_switches.h
+++ b/chromium/gpu/config/gpu_switches.h
@@ -16,12 +16,10 @@ GPU_EXPORT extern const char kGpuBlacklistTestGroup[];
GPU_EXPORT extern const char kGpuDriverBugListTestGroup[];
GPU_EXPORT extern const char kGpuPreferences[];
GPU_EXPORT extern const char kIgnoreGpuBlacklist[];
-GPU_EXPORT extern const char kMaxActiveWebGLContexts[];
GPU_EXPORT extern const char kShaderDiskCacheSizeKB[];
-GPU_EXPORT extern const char kWebglAntialiasingMode[];
-GPU_EXPORT extern const char kWebglMSAASampleCount[];
GPU_EXPORT extern const char kDisableGpuProcessForDX12VulkanInfoCollection[];
GPU_EXPORT extern const char kEnableUnsafeWebGPU[];
+GPU_EXPORT extern const char kEnableWebGLSwapChain[];
} // namespace switches
diff --git a/chromium/gpu/config/gpu_switching.cc b/chromium/gpu/config/gpu_switching.cc
index 10b3955e42d..b585f0ad7cf 100644
--- a/chromium/gpu/config/gpu_switching.cc
+++ b/chromium/gpu/config/gpu_switching.cc
@@ -80,11 +80,11 @@ bool SwitchableGPUsSupported(const GPUInfo& gpu_info,
void InitializeSwitchableGPUs(
const std::vector<int32_t>& driver_bug_workarounds) {
gl::GLContext::SetSwitchableGPUsSupported();
- if (ContainsWorkaround(driver_bug_workarounds, FORCE_DISCRETE_GPU)) {
- gl::GLContext::SetForcedGpuPreference(gl::PreferDiscreteGpu);
+ if (ContainsWorkaround(driver_bug_workarounds, FORCE_HIGH_PERFORMANCE_GPU)) {
+ gl::GLContext::SetForcedGpuPreference(gl::GpuPreference::kHighPerformance);
ForceDiscreteGPU();
- } else if (ContainsWorkaround(driver_bug_workarounds, FORCE_INTEGRATED_GPU)) {
- gl::GLContext::SetForcedGpuPreference(gl::PreferIntegratedGpu);
+ } else if (ContainsWorkaround(driver_bug_workarounds, FORCE_LOW_POWER_GPU)) {
+ gl::GLContext::SetForcedGpuPreference(gl::GpuPreference::kLowPower);
}
}
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index 2fd0939ad6c..f819a2bfe7e 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -56,6 +56,22 @@ GpuFeatureStatus GetAndroidSurfaceControlFeatureStatus(
#endif
}
+GpuFeatureStatus GetMetalFeatureStatus(
+ const std::set<int>& blacklisted_features,
+ const GpuPreferences& gpu_preferences) {
+#if defined(OS_MACOSX)
+ if (blacklisted_features.count(GPU_FEATURE_TYPE_METAL))
+ return kGpuFeatureStatusBlacklisted;
+
+ if (!gpu_preferences.enable_metal)
+ return kGpuFeatureStatusDisabled;
+
+ return kGpuFeatureStatusEnabled;
+#else
+ return kGpuFeatureStatusDisabled;
+#endif
+}
+
GpuFeatureStatus GetGpuRasterizationFeatureStatus(
const std::set<int>& blacklisted_features,
const base::CommandLine& command_line) {
@@ -292,6 +308,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithHardwareAccelerationDisabled() {
kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] =
+ kGpuFeatureStatusDisabled;
#if DCHECK_IS_ON()
for (int ii = 0; ii < NUMBER_OF_GPU_FEATURE_TYPES; ++ii) {
DCHECK_NE(kGpuFeatureStatusUndefined, gpu_feature_info.status_values[ii]);
@@ -326,6 +344,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoWithNoGpu() {
kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] =
+ kGpuFeatureStatusDisabled;
#if DCHECK_IS_ON()
for (int ii = 0; ii < NUMBER_OF_GPU_FEATURE_TYPES; ++ii) {
DCHECK_NE(kGpuFeatureStatusUndefined, gpu_feature_info.status_values[ii]);
@@ -360,6 +380,8 @@ GpuFeatureInfo ComputeGpuFeatureInfoForSwiftShader() {
kGpuFeatureStatusDisabled;
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
kGpuFeatureStatusDisabled;
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] =
+ kGpuFeatureStatusDisabled;
#if DCHECK_IS_ON()
for (int ii = 0; ii < NUMBER_OF_GPU_FEATURE_TYPES; ++ii) {
DCHECK_NE(kGpuFeatureStatusUndefined, gpu_feature_info.status_values[ii]);
@@ -435,6 +457,8 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
GetAndroidSurfaceControlFeatureStatus(blacklisted_features,
gpu_preferences);
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_METAL] =
+ GetMetalFeatureStatus(blacklisted_features, gpu_preferences);
#if DCHECK_IS_ON()
for (int ii = 0; ii < NUMBER_OF_GPU_FEATURE_TYPES; ++ii) {
DCHECK_NE(kGpuFeatureStatusUndefined, gpu_feature_info.status_values[ii]);
@@ -502,57 +526,6 @@ GpuFeatureInfo ComputeGpuFeatureInfo(const GPUInfo& gpu_info,
// initialization than commandline switches.
AppendWorkaroundsToCommandLine(gpu_feature_info, command_line);
- if (gpu_feature_info.IsWorkaroundEnabled(MAX_MSAA_SAMPLE_COUNT_4)) {
- gpu_feature_info.webgl_preferences.msaa_sample_count = 4;
- }
-
- if (command_line->HasSwitch(switches::kWebglMSAASampleCount)) {
- std::string sample_count =
- command_line->GetSwitchValueASCII(switches::kWebglMSAASampleCount);
- uint32_t count;
- if (base::StringToUint(sample_count, &count)) {
- gpu_feature_info.webgl_preferences.msaa_sample_count = count;
- }
- }
-
- if (command_line->HasSwitch(switches::kWebglAntialiasingMode)) {
- std::string mode =
- command_line->GetSwitchValueASCII(switches::kWebglAntialiasingMode);
- if (mode == "none") {
- gpu_feature_info.webgl_preferences.anti_aliasing_mode =
- kAntialiasingModeNone;
- } else if (mode == "explicit") {
- gpu_feature_info.webgl_preferences.anti_aliasing_mode =
- kAntialiasingModeMSAAExplicitResolve;
- } else if (mode == "implicit") {
- gpu_feature_info.webgl_preferences.anti_aliasing_mode =
- kAntialiasingModeMSAAImplicitResolve;
- } else if (mode == "screenspace") {
- gpu_feature_info.webgl_preferences.anti_aliasing_mode =
- kAntialiasingModeScreenSpaceAntialiasing;
- } else {
- gpu_feature_info.webgl_preferences.anti_aliasing_mode =
- kAntialiasingModeUnspecified;
- }
- }
-
-// Set default context limits for WebGL.
-#if defined(OS_ANDROID)
- gpu_feature_info.webgl_preferences.max_active_webgl_contexts = 8u;
-#else
- gpu_feature_info.webgl_preferences.max_active_webgl_contexts = 16u;
-#endif
- gpu_feature_info.webgl_preferences.max_active_webgl_contexts_on_worker = 4u;
-
- uint32_t override_val = gpu_preferences.max_active_webgl_contexts;
- if (override_val) {
- // It shouldn't be common for users to override this. If they do,
- // just override both values.
- gpu_feature_info.webgl_preferences.max_active_webgl_contexts = override_val;
- gpu_feature_info.webgl_preferences.max_active_webgl_contexts_on_worker =
- override_val;
- }
-
return gpu_feature_info;
}
@@ -629,7 +602,7 @@ bool InitializeGLThreadSafe(base::CommandLine* command_line,
return false;
}
}
- CollectContextGraphicsInfo(out_gpu_info, gpu_preferences);
+ CollectContextGraphicsInfo(out_gpu_info);
*out_gpu_feature_info = ComputeGpuFeatureInfo(*out_gpu_info, gpu_preferences,
command_line, nullptr);
if (!out_gpu_feature_info->disabled_extensions.empty()) {
diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt
index 5532d22be01..8e346571bee 100644
--- a/chromium/gpu/config/gpu_workaround_list.txt
+++ b/chromium/gpu/config/gpu_workaround_list.txt
@@ -57,9 +57,9 @@ exit_on_context_lost
flush_on_framebuffer_change
force_cube_complete
force_cube_map_positive_x_allocation
-force_discrete_gpu
-force_integrated_gpu
+force_high_performance_gpu
force_int_or_srgb_cube_texture_complete
+force_low_power_gpu
force_update_scissor_state_when_binding_fbo0
get_frag_data_info_bug
gl_clear_broken
@@ -110,8 +110,5 @@ use_virtualized_gl_contexts
validate_multisample_buffer_allocation
wake_up_gpu_before_drawing
use_copyteximage2d_instead_of_readpixels_on_multisampled_textures
-disable_copy_tex_image_2d_rgb10_a2_adreno
-disable_copy_tex_image_2d_rgb10_a2_tegra
use_eqaa_storage_samples_2
max_3d_array_texture_size_1024
-disable_copy_tex_image_2d_rgb10_a2_mali
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index d4c3b436434..03e251547f2 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -287,7 +287,6 @@
},
"vendor_id": "0x10de",
"gl_vendor": "(?i)nouveau.*",
- "driver_vendor": "Mesa",
"features": [
"all"
]
@@ -1734,6 +1733,21 @@
"features": [
"all"
]
+ },
+ {
+ "id": 162,
+ "cr_bugs": [963000],
+ "description": "Metal is very crashy on macOS 10.12",
+ "os": {
+ "type": "macosx",
+ "version": {
+ "op": "=",
+ "value": "10.12"
+ }
+ },
+ "features": [
+ "metal"
+ ]
}
]
}
diff --git a/chromium/gpu/gles2_conform_support/egl/context.cc b/chromium/gpu/gles2_conform_support/egl/context.cc
index 0101e171ec4..53bb9337698 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.cc
+++ b/chromium/gpu/gles2_conform_support/egl/context.cc
@@ -236,6 +236,10 @@ bool Context::CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) {
return false;
}
+void Context::SetDisplayTransform(gfx::OverlayTransform transform) {
+ NOTREACHED();
+}
+
void Context::ApplyCurrentContext(gl::GLSurface* current_surface) {
DCHECK(HasService());
// The current_surface will be the same as
@@ -276,7 +280,7 @@ bool Context::CreateService(gl::GLSurface* gl_surface) {
command_buffer->set_handler(decoder.get());
gl::GLContextAttribs context_attribs;
- context_attribs.gpu_preference = gl::PreferDiscreteGpu;
+ context_attribs.gpu_preference = gl::GpuPreference::kHighPerformance;
scoped_refptr<gl::GLContext> gl_context(
gl::init::CreateGLContext(nullptr, gl_surface, context_attribs));
if (!gl_context)
diff --git a/chromium/gpu/gles2_conform_support/egl/context.h b/chromium/gpu/gles2_conform_support/egl/context.h
index 9a99d9fd413..046239a394a 100644
--- a/chromium/gpu/gles2_conform_support/egl/context.h
+++ b/chromium/gpu/gles2_conform_support/egl/context.h
@@ -82,6 +82,7 @@ class Context : public base::RefCountedThreadSafe<Context>,
base::OnceClosure callback) override;
void WaitSyncToken(const gpu::SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const gpu::SyncToken& sync_token) override;
+ void SetDisplayTransform(gfx::OverlayTransform transform) override;
// Called by ThreadState to set the needed global variables when this context
// is current.
diff --git a/chromium/gpu/ipc/DEPS b/chromium/gpu/ipc/DEPS
index b82b151b451..9e41f7d52fe 100644
--- a/chromium/gpu/ipc/DEPS
+++ b/chromium/gpu/ipc/DEPS
@@ -1,5 +1,8 @@
include_rules = [
+ "+cc/base/completion_event.h",
"+components/viz/common/features.h",
+ "+components/viz/common/display/update_vsync_parameters_callback.h",
+ "+components/viz/common/gpu/gpu_vsync_callback.h",
"+components/viz/common/resources/resource_format.h",
]
diff --git a/chromium/gpu/ipc/client/BUILD.gn b/chromium/gpu/ipc/client/BUILD.gn
index 5a9ee4b7902..ad92416bfd4 100644
--- a/chromium/gpu/ipc/client/BUILD.gn
+++ b/chromium/gpu/ipc/client/BUILD.gn
@@ -37,7 +37,7 @@ source_set("ipc_client_sources") {
"//gpu/command_buffer/common:common_sources",
"//gpu/config:config_sources",
"//gpu/ipc/common:ipc_common_sources",
- "//media/filters:jpeg_parser",
+ "//media/parsers",
"//mojo/public/cpp/system",
"//ui/gfx:color_space",
"//ui/gfx/geometry",
diff --git a/chromium/gpu/ipc/client/DEPS b/chromium/gpu/ipc/client/DEPS
index 7a79c6ac010..88b89edbae4 100644
--- a/chromium/gpu/ipc/client/DEPS
+++ b/chromium/gpu/ipc/client/DEPS
@@ -11,7 +11,7 @@ specific_include_rules = {
"+components/viz/test/test_gpu_memory_buffer_manager.h",
],
"image_decode_accelerator_proxy.cc": [
- "+media/filters/jpeg_parser.h",
+ "+media/parsers/jpeg_parser.h",
],
"raster_in_process_context_tests.cc": [
"+components/viz/common/resources/resource_format.h",
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
index a5a4c19dfea..b753d1bb921 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.cc
@@ -599,6 +599,11 @@ void CommandBufferProxyImpl::CreateGpuFence(uint32_t gpu_fence_id,
handle));
}
+void CommandBufferProxyImpl::SetDisplayTransform(
+ gfx::OverlayTransform transform) {
+ NOTREACHED();
+}
+
void CommandBufferProxyImpl::GetGpuFence(
uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)> callback) {
diff --git a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
index 6464b6bd6b7..48b4586fddf 100644
--- a/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
+++ b/chromium/gpu/ipc/client/command_buffer_proxy_impl.h
@@ -115,6 +115,7 @@ class GPU_EXPORT CommandBufferProxyImpl : public gpu::CommandBuffer,
void GetGpuFence(uint32_t gpu_fence_id,
base::OnceCallback<void(std::unique_ptr<gfx::GpuFence>)>
callback) override;
+ void SetDisplayTransform(gfx::OverlayTransform transform) override;
void SetLock(base::Lock* lock) override;
void EnsureWorkVisible() override;
diff --git a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
index ef7442f0186..098a2cf2a67 100644
--- a/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
+++ b/chromium/gpu/ipc/client/image_decode_accelerator_proxy.cc
@@ -17,7 +17,7 @@
#include "gpu/ipc/client/gpu_channel_host.h"
#include "gpu/ipc/common/command_buffer_id.h"
#include "gpu/ipc/common/gpu_messages.h"
-#include "media/filters/jpeg_parser.h"
+#include "media/parsers/jpeg_parser.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
diff --git a/chromium/gpu/ipc/client/raster_in_process_context_tests.cc b/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
index fd663166ef7..1ac0a026a03 100644
--- a/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
+++ b/chromium/gpu/ipc/client/raster_in_process_context_tests.cc
@@ -59,7 +59,8 @@ class RasterInProcessCommandBufferTest : public ::testing::Test {
void SetUp() override {
if (!RasterInProcessContext::SupportedInTest())
return;
- gpu_memory_buffer_factory_ = GpuMemoryBufferFactory::CreateNativeType();
+ gpu_memory_buffer_factory_ =
+ GpuMemoryBufferFactory::CreateNativeType(nullptr);
gpu_memory_buffer_manager_ =
std::make_unique<viz::TestGpuMemoryBufferManager>();
gpu_thread_holder_.GetGpuPreferences()->texture_target_exception_list =
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
index db7e9432125..f49c4956812 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
@@ -126,8 +126,11 @@ Mailbox SharedImageInterfaceProxy::CreateSharedImage(
DCHECK(gpu_memory_buffer->GetType() == gfx::NATIVE_PIXMAP ||
gpu_memory_buffer->GetType() == gfx::ANDROID_HARDWARE_BUFFER ||
gpu_memory_buffer_manager);
+
+ auto mailbox = Mailbox::GenerateForSharedImage();
+
GpuChannelMsg_CreateGMBSharedImage_Params params;
- params.mailbox = Mailbox::GenerateForSharedImage();
+ params.mailbox = mailbox;
params.handle = gpu_memory_buffer->CloneHandle();
params.size = gpu_memory_buffer->GetSize();
params.format = gpu_memory_buffer->GetFormat();
@@ -158,7 +161,7 @@ Mailbox SharedImageInterfaceProxy::CreateSharedImage(
gpu_memory_buffer_manager->SetDestructionSyncToken(gpu_memory_buffer,
sync_token);
}
- return params.mailbox;
+ return mailbox;
}
void SharedImageInterfaceProxy::UpdateSharedImage(const SyncToken& sync_token,
@@ -286,4 +289,51 @@ bool SharedImageInterfaceProxy::GetSHMForPixelData(
return true;
}
+#if defined(OS_WIN)
+SharedImageInterface::SwapChainMailboxes
+SharedImageInterfaceProxy::CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) {
+ GpuChannelMsg_CreateSwapChain_Params params;
+ params.front_buffer_mailbox = Mailbox::GenerateForSharedImage();
+ params.back_buffer_mailbox = Mailbox::GenerateForSharedImage();
+ params.format = format;
+ params.size = size;
+ params.color_space = color_space;
+ params.usage = usage;
+ {
+ base::AutoLock lock(lock_);
+ params.release_id = ++next_release_id_;
+ last_flush_id_ = host_->EnqueueDeferredMessage(
+ GpuChannelMsg_CreateSwapChain(route_id_, params));
+ }
+ return {params.front_buffer_mailbox, params.back_buffer_mailbox};
+}
+
+void SharedImageInterfaceProxy::PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) {
+ std::vector<SyncToken> dependencies;
+ if (sync_token.HasData()) {
+ dependencies.push_back(sync_token);
+ SyncToken& new_token = dependencies.back();
+ if (!new_token.verified_flush()) {
+ // Only allow unverified sync tokens for the same channel.
+ DCHECK_EQ(sync_token.namespace_id(), gpu::CommandBufferNamespace::GPU_IO);
+ int sync_token_channel_id =
+ ChannelIdFromCommandBufferId(sync_token.command_buffer_id());
+ DCHECK_EQ(sync_token_channel_id, host_->channel_id());
+ new_token.SetVerifyFlush();
+ }
+ }
+ {
+ base::AutoLock lock(lock_);
+ uint32_t release_id = ++next_release_id_;
+ last_flush_id_ = host_->EnqueueDeferredMessage(
+ GpuChannelMsg_PresentSwapChain(route_id_, mailbox, release_id),
+ std::move(dependencies));
+ }
+}
+#endif // OS_WIN
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.h b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
index d39497b70e6..e013292cb6e 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.h
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
@@ -8,6 +8,7 @@
#include "base/memory/read_only_shared_memory_region.h"
#include "base/synchronization/lock.h"
#include "base/thread_annotations.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/client/shared_image_interface.h"
#include "gpu/command_buffer/common/buffer.h"
@@ -41,6 +42,15 @@ class SharedImageInterfaceProxy : public SharedImageInterface {
SyncToken GenVerifiedSyncToken() override;
SyncToken GenUnverifiedSyncToken() override;
+#if defined(OS_WIN)
+ SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override;
+ void PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) override;
+#endif // OS_WIN
+
private:
bool GetSHMForPixelData(base::span<const uint8_t> pixel_data,
size_t* shm_offset,
diff --git a/chromium/gpu/ipc/command_buffer_task_executor.cc b/chromium/gpu/ipc/command_buffer_task_executor.cc
index fb1b239ebe7..9bdab6666ad 100644
--- a/chromium/gpu/ipc/command_buffer_task_executor.cc
+++ b/chromium/gpu/ipc/command_buffer_task_executor.cc
@@ -21,7 +21,8 @@ CommandBufferTaskExecutor::CommandBufferTaskExecutor(
scoped_refptr<gl::GLShareGroup> share_group,
gl::GLSurfaceFormat share_group_surface_format,
SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache)
+ gles2::ProgramCache* program_cache,
+ scoped_refptr<SharedContextState> shared_context_state)
: gpu_preferences_(gpu_preferences),
gpu_feature_info_(gpu_feature_info),
sync_point_manager_(sync_point_manager),
@@ -30,7 +31,8 @@ CommandBufferTaskExecutor::CommandBufferTaskExecutor(
share_group_surface_format_(share_group_surface_format),
program_cache_(program_cache),
shader_translator_cache_(gpu_preferences_),
- shared_image_manager_(shared_image_manager) {
+ shared_image_manager_(shared_image_manager),
+ shared_context_state_(std::move(shared_context_state)) {
DCHECK(mailbox_manager_);
DCHECK(shared_image_manager_);
}
diff --git a/chromium/gpu/ipc/command_buffer_task_executor.h b/chromium/gpu/ipc/command_buffer_task_executor.h
index f39f02dfd85..8b2dac28c5d 100644
--- a/chromium/gpu/ipc/command_buffer_task_executor.h
+++ b/chromium/gpu/ipc/command_buffer_task_executor.h
@@ -18,6 +18,7 @@
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/command_buffer/service/service_discardable_manager.h"
#include "gpu/command_buffer/service/shader_translator_cache.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/config/gpu_preferences.h"
@@ -66,14 +67,16 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
virtual void ContinueTask(base::OnceClosure task) = 0;
};
- CommandBufferTaskExecutor(const GpuPreferences& gpu_preferences,
- const GpuFeatureInfo& gpu_feature_info,
- SyncPointManager* sync_point_manager,
- MailboxManager* mailbox_manager,
- scoped_refptr<gl::GLShareGroup> share_group,
- gl::GLSurfaceFormat share_group_surface_format,
- SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache);
+ CommandBufferTaskExecutor(
+ const GpuPreferences& gpu_preferences,
+ const GpuFeatureInfo& gpu_feature_info,
+ SyncPointManager* sync_point_manager,
+ MailboxManager* mailbox_manager,
+ scoped_refptr<gl::GLShareGroup> share_group,
+ gl::GLSurfaceFormat share_group_surface_format,
+ SharedImageManager* shared_image_manager,
+ gles2::ProgramCache* program_cache,
+ scoped_refptr<SharedContextState> shared_context_state);
virtual ~CommandBufferTaskExecutor();
// Always use virtualized GL contexts if this returns true.
@@ -119,6 +122,10 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
}
SharedImageManager* shared_image_manager() { return shared_image_manager_; }
+ scoped_refptr<SharedContextState> shared_context_state() {
+ return shared_context_state_;
+ }
+
// These methods construct accessed fields if not already initialized.
scoped_refptr<gl::GLShareGroup> share_group();
gles2::Outputter* outputter();
@@ -140,6 +147,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT CommandBufferTaskExecutor {
gles2::ShaderTranslatorCache shader_translator_cache_;
gles2::FramebufferCompletenessCache framebuffer_completeness_cache_;
SharedImageManager* shared_image_manager_;
+ const scoped_refptr<SharedContextState> shared_context_state_;
// No-op default initialization is used in in-process mode.
GpuProcessActivityFlags activity_flags_;
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index f7a43139a6b..0e726471d77 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -81,6 +81,8 @@ source_set("ipc_common_sources") {
"gpu_param_traits_macros.h",
"memory_stats.cc",
"memory_stats.h",
+ "vulkan_ycbcr_info.cc",
+ "vulkan_ycbcr_info.h",
]
if (is_mac) {
@@ -198,12 +200,14 @@ mojom("interfaces") {
"memory_stats.mojom",
"surface_handle.mojom",
"sync_token.mojom",
+ "vulkan_ycbcr_info.mojom",
]
public_deps = [
":gpu_preferences_interface",
"//mojo/public/mojom/base",
"//ui/gfx/geometry/mojo",
+ "//ui/gfx/mojo",
]
}
@@ -246,4 +250,7 @@ source_set("struct_traits") {
"//gpu/ipc/common",
"//mojo/public/cpp/bindings:bindings",
]
+ if (is_android) {
+ sources += [ "vulkan_ycbcr_info_mojom_traits.h" ]
+ }
}
diff --git a/chromium/gpu/ipc/common/OWNERS b/chromium/gpu/ipc/common/OWNERS
index 62994ee8575..6aa938a9ae5 100644
--- a/chromium/gpu/ipc/common/OWNERS
+++ b/chromium/gpu/ipc/common/OWNERS
@@ -14,3 +14,5 @@ per-file *_struct_traits*.*=set noparent
per-file *_struct_traits*.*=file://ipc/SECURITY_OWNERS
per-file *.typemap=set noparent
per-file *.typemap=file://ipc/SECURITY_OWNERS
+per-file *_mojom_traits*.*=set noparent
+per-file *_mojom_traits*.*=file://ipc/SECURITY_OWNERS
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits.h
index 1dc4008acc1..5b1ff5c20ab 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits.h
@@ -5,7 +5,6 @@
#ifndef GPU_IPC_COMMON_GPU_COMMAND_BUFFER_TRAITS_H_
#define GPU_IPC_COMMON_GPU_COMMAND_BUFFER_TRAITS_H_
-#include "gpu/command_buffer/common/id_type.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_command_buffer_traits_multi.h"
#include "ipc/ipc_message_utils.h"
@@ -60,26 +59,6 @@ struct GPU_EXPORT ParamTraits<gpu::MailboxHolder> {
static void Log(const param_type& p, std::string* l);
};
-template <typename TypeMarker, typename WrappedType, WrappedType kInvalidValue>
-struct ParamTraits<gpu::IdType<TypeMarker, WrappedType, kInvalidValue>> {
- using param_type = gpu::IdType<TypeMarker, WrappedType, kInvalidValue>;
- static void Write(base::Pickle* m, const param_type& p) {
- WriteParam(m, p.GetUnsafeValue());
- }
- static bool Read(const base::Pickle* m,
- base::PickleIterator* iter,
- param_type* r) {
- WrappedType value;
- if (!ReadParam(m, iter, &value))
- return false;
- *r = param_type::FromUnsafeValue(value);
- return true;
- }
- static void Log(const param_type& p, std::string* l) {
- LogParam(p.GetUnsafeValue(), l);
- }
-};
-
} // namespace IPC
#endif // GPU_IPC_COMMON_GPU_COMMAND_BUFFER_TRAITS_H_
diff --git a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
index be7eae4fc34..af5d257e9ba 100644
--- a/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
+++ b/chromium/gpu/ipc/common/gpu_command_buffer_traits_multi.h
@@ -26,7 +26,7 @@ IPC_ENUM_TRAITS_MIN_MAX_VALUE(
gpu::CommandBufferNamespace,
gpu::CommandBufferNamespace::INVALID,
gpu::CommandBufferNamespace::NUM_COMMAND_BUFFER_NAMESPACES - 1)
-IPC_ENUM_TRAITS_MAX_VALUE(gl::GpuPreference, gl::GpuPreferenceLast)
+IPC_ENUM_TRAITS_MAX_VALUE(gl::GpuPreference, gl::GpuPreference::kMaxValue)
IPC_ENUM_TRAITS_MAX_VALUE(gpu::ContextType, gpu::CONTEXT_TYPE_LAST)
IPC_ENUM_TRAITS_MAX_VALUE(gpu::ColorSpace, gpu::COLOR_SPACE_LAST)
@@ -139,6 +139,7 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::Capabilities)
IPC_STRUCT_TRAITS_MEMBER(dc_layers)
IPC_STRUCT_TRAITS_MEMBER(use_dc_overlays_for_video)
IPC_STRUCT_TRAITS_MEMBER(protected_video_swap_chain)
+ IPC_STRUCT_TRAITS_MEMBER(gpu_vsync)
IPC_STRUCT_TRAITS_MEMBER(disable_non_empty_post_sub_buffers)
IPC_STRUCT_TRAITS_MEMBER(avoid_stencil_buffers)
IPC_STRUCT_TRAITS_MEMBER(disable_2d_canvas_copy_on_write)
@@ -195,7 +196,6 @@ IPC_STRUCT_TRAITS_BEGIN(gpu::ContextCreationAttribs)
IPC_STRUCT_TRAITS_MEMBER(enable_raster_interface)
IPC_STRUCT_TRAITS_MEMBER(enable_oop_rasterization)
IPC_STRUCT_TRAITS_MEMBER(enable_swap_timestamps_if_supported)
- IPC_STRUCT_TRAITS_MEMBER(backed_by_surface_texture)
IPC_STRUCT_TRAITS_END()
IPC_STRUCT_TRAITS_BEGIN(gpu::GpuMemoryBufferFormatSet)
diff --git a/chromium/gpu/ipc/common/gpu_feature_info.mojom b/chromium/gpu/ipc/common/gpu_feature_info.mojom
index 4743daefecd..b679921af9a 100644
--- a/chromium/gpu/ipc/common/gpu_feature_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_feature_info.mojom
@@ -5,6 +5,8 @@
// gpu/config/gpu_feature_info.h
module gpu.mojom;
+import "ui/gfx/mojo/buffer_types.mojom";
+
// gpu::GpuFeatureStatus
enum GpuFeatureStatus {
Enabled,
@@ -15,23 +17,6 @@ enum GpuFeatureStatus {
Max
};
-// gpu::AntialiasingMode
-enum AntialiasingMode {
- kUnspecified,
- kNone,
- kMSAAImplicitResolve,
- kMSAAExplicitResolve,
- kScreenSpaceAntialiasing
-};
-
-// gpu::WebglPreferences
-struct WebglPreferences {
- AntialiasingMode anti_aliasing_mode;
- uint32 msaa_sample_count;
- uint32 max_active_webgl_contexts;
- uint32 max_active_webgl_contexts_on_worker;
-};
-
// gpu:GpuFeatureInfo
struct GpuFeatureInfo {
// The array should have one entry for each GpuFeatureType. The size of the
@@ -50,9 +35,6 @@ struct GpuFeatureInfo {
// WebGL extensions disabled by GpuDriverBugWorkarounds, separated by ' '.
string disabled_webgl_extensions;
- // Preferences for WebGL.
- WebglPreferences webgl_preferences;
-
// The array contains a list of gpu blacklist entry indices that apply in the
// current platform. The entries are defined in
// gpu/config/software_rendering_list.json.
@@ -62,4 +44,9 @@ struct GpuFeatureInfo {
// in the current platform. The entries are defined in
// gpu/config/gpu_driver_bug_list.json.
array<uint32> applied_gpu_driver_bug_list_entries;
+
+ // BufferFormats that can be allocated and then bound, if known and provided
+ // by the platform.
+ array<gfx.mojom.BufferFormat>
+ supported_buffer_formats_for_allocation_and_texturing;
};
diff --git a/chromium/gpu/ipc/common/gpu_feature_info.typemap b/chromium/gpu/ipc/common/gpu_feature_info.typemap
index e66c0f1410c..4531be67deb 100644
--- a/chromium/gpu/ipc/common/gpu_feature_info.typemap
+++ b/chromium/gpu/ipc/common/gpu_feature_info.typemap
@@ -14,7 +14,5 @@ public_deps = [
]
type_mappings = [
"gpu.mojom.GpuFeatureStatus=gpu::GpuFeatureStatus",
- "gpu.mojom.AntialiasingMode=gpu::AntialiasingMode",
- "gpu.mojom.WebglPreferences=gpu::WebglPreferences",
"gpu.mojom.GpuFeatureInfo=gpu::GpuFeatureInfo",
]
diff --git a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.cc
index b11f7714d07..83d9c7844c2 100644
--- a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.cc
@@ -20,7 +20,6 @@ bool StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo>::
&out->enabled_gpu_driver_bug_workarounds) &&
data.ReadDisabledExtensions(&out->disabled_extensions) &&
data.ReadDisabledWebglExtensions(&out->disabled_webgl_extensions) &&
- data.ReadWebglPreferences(&out->webgl_preferences) &&
data.ReadAppliedGpuBlacklistEntries(
&out->applied_gpu_blacklist_entries) &&
gpu::GpuBlacklist::AreEntryIndicesValid(
@@ -28,7 +27,9 @@ bool StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo>::
data.ReadAppliedGpuDriverBugListEntries(
&out->applied_gpu_driver_bug_list_entries) &&
gpu::GpuDriverBugList::AreEntryIndicesValid(
- out->applied_gpu_driver_bug_list_entries);
+ out->applied_gpu_driver_bug_list_entries) &&
+ data.ReadSupportedBufferFormatsForAllocationAndTexturing(
+ &out->supported_buffer_formats_for_allocation_and_texturing);
}
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
index a31b0c5f426..3c3713f2c81 100644
--- a/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_feature_info_struct_traits.h
@@ -9,6 +9,7 @@
#include "gpu/config/gpu_driver_bug_list.h"
#include "gpu/config/gpu_feature_info.h"
#include "gpu/ipc/common/gpu_feature_info.mojom.h"
+#include "ui/gfx/mojo/buffer_types_struct_traits.h"
namespace mojo {
@@ -60,80 +61,6 @@ struct EnumTraits<gpu::mojom::GpuFeatureStatus, gpu::GpuFeatureStatus> {
};
template <>
-struct EnumTraits<gpu::mojom::AntialiasingMode, gpu::AntialiasingMode> {
- static gpu::mojom::AntialiasingMode ToMojom(gpu::AntialiasingMode mode) {
- switch (mode) {
- case gpu::kAntialiasingModeUnspecified:
- return gpu::mojom::AntialiasingMode::kUnspecified;
- case gpu::kAntialiasingModeNone:
- return gpu::mojom::AntialiasingMode::kNone;
- case gpu::kAntialiasingModeMSAAImplicitResolve:
- return gpu::mojom::AntialiasingMode::kMSAAImplicitResolve;
- case gpu::kAntialiasingModeMSAAExplicitResolve:
- return gpu::mojom::AntialiasingMode::kMSAAExplicitResolve;
- case gpu::kAntialiasingModeScreenSpaceAntialiasing:
- return gpu::mojom::AntialiasingMode::kScreenSpaceAntialiasing;
- }
- NOTREACHED();
- return gpu::mojom::AntialiasingMode::kUnspecified;
- }
-
- static bool FromMojom(gpu::mojom::AntialiasingMode input,
- gpu::AntialiasingMode* out) {
- switch (input) {
- case gpu::mojom::AntialiasingMode::kUnspecified:
- *out = gpu::kAntialiasingModeUnspecified;
- return true;
- case gpu::mojom::AntialiasingMode::kNone:
- *out = gpu::kAntialiasingModeNone;
- return true;
- case gpu::mojom::AntialiasingMode::kMSAAImplicitResolve:
- *out = gpu::kAntialiasingModeMSAAImplicitResolve;
- return true;
- case gpu::mojom::AntialiasingMode::kMSAAExplicitResolve:
- *out = gpu::kAntialiasingModeMSAAExplicitResolve;
- return true;
- case gpu::mojom::AntialiasingMode::kScreenSpaceAntialiasing:
- *out = gpu::kAntialiasingModeScreenSpaceAntialiasing;
- return true;
- }
- return false;
- }
-};
-
-template <>
-struct StructTraits<gpu::mojom::WebglPreferencesDataView,
- gpu::WebglPreferences> {
- static bool Read(gpu::mojom::WebglPreferencesDataView data,
- gpu::WebglPreferences* out) {
- out->msaa_sample_count = data.msaa_sample_count();
- out->max_active_webgl_contexts = data.max_active_webgl_contexts();
- out->max_active_webgl_contexts_on_worker =
- data.max_active_webgl_contexts_on_worker();
- return data.ReadAntiAliasingMode(&out->anti_aliasing_mode);
- }
-
- static gpu::AntialiasingMode anti_aliasing_mode(
- const gpu::WebglPreferences& prefs) {
- return prefs.anti_aliasing_mode;
- }
-
- static uint32_t msaa_sample_count(const gpu::WebglPreferences& prefs) {
- return prefs.msaa_sample_count;
- }
-
- static uint32_t max_active_webgl_contexts(
- const gpu::WebglPreferences& prefs) {
- return prefs.max_active_webgl_contexts;
- }
-
- static uint32_t max_active_webgl_contexts_on_worker(
- const gpu::WebglPreferences& prefs) {
- return prefs.max_active_webgl_contexts_on_worker;
- }
-};
-
-template <>
struct StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo> {
static bool Read(gpu::mojom::GpuFeatureInfoDataView data,
gpu::GpuFeatureInfo* out);
@@ -159,11 +86,6 @@ struct StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo> {
return info.disabled_webgl_extensions;
}
- static const gpu::WebglPreferences& webgl_preferences(
- const gpu::GpuFeatureInfo& info) {
- return info.webgl_preferences;
- }
-
static const std::vector<uint32_t>& applied_gpu_blacklist_entries(
const gpu::GpuFeatureInfo& info) {
return info.applied_gpu_blacklist_entries;
@@ -173,6 +95,12 @@ struct StructTraits<gpu::mojom::GpuFeatureInfoDataView, gpu::GpuFeatureInfo> {
const gpu::GpuFeatureInfo& info) {
return info.applied_gpu_driver_bug_list_entries;
}
+
+ static std::vector<gfx::BufferFormat>
+ supported_buffer_formats_for_allocation_and_texturing(
+ const gpu::GpuFeatureInfo& input) {
+ return input.supported_buffer_formats_for_allocation_and_texturing;
+ }
};
} // namespace mojo
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 252eb5fc97b..954c46eb99c 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -97,19 +97,12 @@ struct ImageDecodeAcceleratorSupportedProfile {
array<ImageDecodeAcceleratorSubsampling> subsamplings;
};
-// gpu::OverlayFormat
+// gpu::OverlaySupport
[EnableIf=is_win]
-enum OverlayFormat {
- BGRA,
- YUY2,
- NV12,
-};
-
-// gpu::OverlayCapability
-[EnableIf=is_win]
-struct OverlayCapability {
- OverlayFormat format;
- bool is_scaling_supported;
+enum OverlaySupport {
+ NONE,
+ DIRECT,
+ SCALING,
};
// gpu::Dx12VulkanVersionInfo
@@ -153,7 +146,9 @@ struct GpuInfo {
[EnableIf=is_win]
bool supports_overlays;
[EnableIf=is_win]
- array<OverlayCapability> overlay_capabilities;
+ OverlaySupport yuy2_overlay_support;
+ [EnableIf=is_win]
+ OverlaySupport nv12_overlay_support;
[EnableIf=is_win]
DxDiagNode dx_diagnostics;
[EnableIf=is_win]
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
index f86fbd346d5..64ade578476 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.cc
@@ -297,46 +297,37 @@ bool StructTraits<gpu::mojom::ImageDecodeAcceleratorSupportedProfileDataView,
#if defined(OS_WIN)
// static
-gpu::mojom::OverlayFormat
-EnumTraits<gpu::mojom::OverlayFormat, gpu::OverlayFormat>::ToMojom(
- gpu::OverlayFormat format) {
- switch (format) {
- case gpu::OverlayFormat::kBGRA:
- return gpu::mojom::OverlayFormat::BGRA;
- case gpu::OverlayFormat::kYUY2:
- return gpu::mojom::OverlayFormat::YUY2;
- case gpu::OverlayFormat::kNV12:
- return gpu::mojom::OverlayFormat::NV12;
+gpu::mojom::OverlaySupport
+EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport>::ToMojom(
+ gpu::OverlaySupport support) {
+ switch (support) {
+ case gpu::OverlaySupport::kNone:
+ return gpu::mojom::OverlaySupport::NONE;
+ case gpu::OverlaySupport::kDirect:
+ return gpu::mojom::OverlaySupport::DIRECT;
+ case gpu::OverlaySupport::kScaling:
+ return gpu::mojom::OverlaySupport::SCALING;
}
}
-bool EnumTraits<gpu::mojom::OverlayFormat, gpu::OverlayFormat>::FromMojom(
- gpu::mojom::OverlayFormat input,
- gpu::OverlayFormat* out) {
+bool EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport>::FromMojom(
+ gpu::mojom::OverlaySupport input,
+ gpu::OverlaySupport* out) {
switch (input) {
- case gpu::mojom::OverlayFormat::BGRA:
- *out = gpu::OverlayFormat::kBGRA;
+ case gpu::mojom::OverlaySupport::NONE:
+ *out = gpu::OverlaySupport::kNone;
break;
- case gpu::mojom::OverlayFormat::YUY2:
- *out = gpu::OverlayFormat::kYUY2;
+ case gpu::mojom::OverlaySupport::DIRECT:
+ *out = gpu::OverlaySupport::kDirect;
break;
- case gpu::mojom::OverlayFormat::NV12:
- *out = gpu::OverlayFormat::kNV12;
+ case gpu::mojom::OverlaySupport::SCALING:
+ *out = gpu::OverlaySupport::kScaling;
break;
}
return true;
}
// static
-bool StructTraits<
- gpu::mojom::OverlayCapabilityDataView,
- gpu::OverlayCapability>::Read(gpu::mojom::OverlayCapabilityDataView data,
- gpu::OverlayCapability* out) {
- out->is_scaling_supported = data.is_scaling_supported();
- return data.ReadFormat(&out->format);
-}
-
-// static
bool StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
gpu::Dx12VulkanVersionInfo>::
Read(gpu::mojom::Dx12VulkanVersionInfoDataView data,
@@ -392,7 +383,8 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
data.ReadGlWsExtensions(&out->gl_ws_extensions) &&
data.ReadDirectRenderingVersion(&out->direct_rendering_version) &&
#if defined(OS_WIN)
- data.ReadOverlayCapabilities(&out->overlay_capabilities) &&
+ data.ReadYuy2OverlaySupport(&out->yuy2_overlay_support) &&
+ data.ReadNv12OverlaySupport(&out->nv12_overlay_support) &&
data.ReadDxDiagnostics(&out->dx_diagnostics) &&
data.ReadDx12VulkanVersionInfo(&out->dx12_vulkan_version_info) &&
#endif
diff --git a/chromium/gpu/ipc/common/gpu_info_struct_traits.h b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
index dca1fcf5680..9877fa47242 100644
--- a/chromium/gpu/ipc/common/gpu_info_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_struct_traits.h
@@ -189,25 +189,10 @@ struct StructTraits<gpu::mojom::ImageDecodeAcceleratorSupportedProfileDataView,
#if defined(OS_WIN)
template <>
-struct EnumTraits<gpu::mojom::OverlayFormat, gpu::OverlayFormat> {
- static gpu::mojom::OverlayFormat ToMojom(gpu::OverlayFormat format);
- static bool FromMojom(gpu::mojom::OverlayFormat input,
- gpu::OverlayFormat* out);
-};
-
-template <>
-struct StructTraits<gpu::mojom::OverlayCapabilityDataView,
- gpu::OverlayCapability> {
- static bool Read(gpu::mojom::OverlayCapabilityDataView data,
- gpu::OverlayCapability* out);
-
- static gpu::OverlayFormat format(const gpu::OverlayCapability& input) {
- return input.format;
- }
-
- static bool is_scaling_supported(const gpu::OverlayCapability& input) {
- return input.is_scaling_supported;
- }
+struct EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport> {
+ static gpu::mojom::OverlaySupport ToMojom(gpu::OverlaySupport support);
+ static bool FromMojom(gpu::mojom::OverlaySupport input,
+ gpu::OverlaySupport* out);
};
template <>
@@ -341,9 +326,12 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
return input.supports_overlays;
}
- static const gpu::OverlayCapabilities& overlay_capabilities(
- const gpu::GPUInfo& input) {
- return input.overlay_capabilities;
+ static gpu::OverlaySupport yuy2_overlay_support(const gpu::GPUInfo& input) {
+ return input.yuy2_overlay_support;
+ }
+
+ static gpu::OverlaySupport nv12_overlay_support(const gpu::GPUInfo& input) {
+ return input.nv12_overlay_support;
}
static const gpu::DxDiagNode& dx_diagnostics(const gpu::GPUInfo& input) {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
index 20ce691e79f..e8ce975c949 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.cc
@@ -4,6 +4,8 @@
#include "gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h"
+#include <vulkan/vulkan.h>
+
#include <utility>
#include "base/bind.h"
@@ -71,8 +73,8 @@ base::OnceClosure GpuMemoryBufferImplNativePixmap::AllocateForTesting(
scoped_refptr<gfx::NativePixmap> pixmap =
ui::OzonePlatform::GetInstance()
->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(gfx::kNullAcceleratedWidget, size, format,
- usage);
+ ->CreateNativePixmap(gfx::kNullAcceleratedWidget, VK_NULL_HANDLE,
+ size, format, usage);
handle->native_pixmap_handle = pixmap->ExportHandle();
#else
// TODO(j.isorce): use gbm_bo_create / gbm_bo_get_fd from system libgbm.
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc
index bf77a6d5494..f6da0a870e7 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap_unittest.cc
@@ -3,14 +3,20 @@
// found in the LICENSE file.
#include "gpu/ipc/common/gpu_memory_buffer_impl_native_pixmap.h"
+
+#include "build/build_config.h"
#include "gpu/ipc/common/gpu_memory_buffer_impl_test_template.h"
namespace gpu {
namespace {
+// On Fuchsia NativePixmap depends on Vulkan, which is not initialized in tests.
+// See crbug.com/957700
+#if !defined(OS_FUCHSIA)
INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferImplNativePixmap,
GpuMemoryBufferImplTest,
GpuMemoryBufferImplNativePixmap);
+#endif
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index 463001b29e4..90a78c6b038 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -105,6 +105,18 @@ IPC_STRUCT_BEGIN(GpuChannelMsg_CreateGMBSharedImage_Params)
IPC_STRUCT_MEMBER(uint32_t, release_id)
IPC_STRUCT_END()
+#if defined(OS_WIN)
+IPC_STRUCT_BEGIN(GpuChannelMsg_CreateSwapChain_Params)
+ IPC_STRUCT_MEMBER(gpu::Mailbox, front_buffer_mailbox)
+ IPC_STRUCT_MEMBER(gpu::Mailbox, back_buffer_mailbox)
+ IPC_STRUCT_MEMBER(viz::ResourceFormat, format)
+ IPC_STRUCT_MEMBER(gfx::Size, size)
+ IPC_STRUCT_MEMBER(gfx::ColorSpace, color_space)
+ IPC_STRUCT_MEMBER(uint32_t, usage)
+ IPC_STRUCT_MEMBER(uint32_t, release_id)
+IPC_STRUCT_END()
+#endif // OS_WIN
+
IPC_STRUCT_BEGIN(GpuChannelMsg_ScheduleImageDecode_Params)
IPC_STRUCT_MEMBER(std::vector<uint8_t>, encoded_data)
IPC_STRUCT_MEMBER(gfx::Size, output_size)
@@ -157,6 +169,13 @@ IPC_MESSAGE_ROUTED2(GpuChannelMsg_UpdateSharedImage,
gpu::Mailbox /* id */,
uint32_t /* release_id */)
IPC_MESSAGE_ROUTED1(GpuChannelMsg_DestroySharedImage, gpu::Mailbox /* id */)
+#if defined(OS_WIN)
+IPC_MESSAGE_ROUTED1(GpuChannelMsg_CreateSwapChain,
+ GpuChannelMsg_CreateSwapChain_Params /* params */)
+IPC_MESSAGE_ROUTED2(GpuChannelMsg_PresentSwapChain,
+ gpu::Mailbox /* mailbox */,
+ uint32_t /* release_id */)
+#endif // OS_WIN
IPC_MESSAGE_ROUTED1(GpuChannelMsg_RegisterSharedImageUploadBuffer,
base::ReadOnlySharedMemoryRegion /* shm */)
diff --git a/chromium/gpu/ipc/common/gpu_preferences.mojom b/chromium/gpu/ipc/common/gpu_preferences.mojom
index b6bfac1ac11..fabd910f3d7 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.mojom
+++ b/chromium/gpu/ipc/common/gpu_preferences.mojom
@@ -7,14 +7,6 @@ module gpu.mojom;
import "ui/gfx/mojo/buffer_types.mojom";
-// gpu::GpuPreferences::VpxDecodeVendors
-enum VpxDecodeVendors {
- VPX_VENDOR_NONE = 0,
- VPX_VENDOR_MICROSOFT = 1,
- VPX_VENDOR_AMD = 2,
- VPX_VENDOR_ALL = 3,
-};
-
// gpu::GpuPreferences
struct GpuPreferences {
bool disable_accelerated_video_decode;
@@ -25,7 +17,6 @@ struct GpuPreferences {
// TODO(http://crbug.com/676224) Support preprocessing of mojoms. Following
// variables should be used on Windows only.
- VpxDecodeVendors enable_accelerated_vpx_decode;
bool enable_low_latency_dxva;
bool enable_zero_copy_dxgi_video;
bool enable_nv12_dxgi_video;
@@ -49,7 +40,6 @@ struct GpuPreferences {
bool enable_threaded_texture_mailboxes;
bool gl_shader_interm_output;
bool emulate_shader_precision;
- uint32 max_active_webgl_contexts;
bool enable_android_surface_control;
bool enable_gpu_service_logging;
bool enable_gpu_service_tracing;
@@ -66,6 +56,7 @@ struct GpuPreferences {
bool enable_vulkan;
bool disable_vulkan_surface;
bool disable_vulkan_fallback_to_gl_for_testing;
+ bool enable_metal;
bool enable_gpu_benchmarking_extension;
bool enable_webgpu;
};
diff --git a/chromium/gpu/ipc/common/gpu_preferences.typemap b/chromium/gpu/ipc/common/gpu_preferences.typemap
index 19089fef0ef..fcb280da741 100644
--- a/chromium/gpu/ipc/common/gpu_preferences.typemap
+++ b/chromium/gpu/ipc/common/gpu_preferences.typemap
@@ -17,7 +17,4 @@ deps = [
sources = [
"//gpu/ipc/common/gpu_preferences_struct_traits.h",
]
-type_mappings = [
- "gpu.mojom.GpuPreferences=gpu::GpuPreferences",
- "gpu.mojom.VpxDecodeVendors=gpu::GpuPreferences::VpxDecodeVendors",
-]
+type_mappings = [ "gpu.mojom.GpuPreferences=gpu::GpuPreferences" ]
diff --git a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
index d30fc0594c4..cf1347d56c0 100644
--- a/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
+++ b/chromium/gpu/ipc/common/gpu_preferences_struct_traits.h
@@ -14,45 +14,6 @@
namespace mojo {
template <>
-struct EnumTraits<gpu::mojom::VpxDecodeVendors,
- gpu::GpuPreferences::VpxDecodeVendors> {
- static gpu::mojom::VpxDecodeVendors ToMojom(
- gpu::GpuPreferences::VpxDecodeVendors vpx) {
- switch (vpx) {
- case gpu::GpuPreferences::VPX_VENDOR_NONE:
- return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE;
- case gpu::GpuPreferences::VPX_VENDOR_MICROSOFT:
- return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_MICROSOFT;
- case gpu::GpuPreferences::VPX_VENDOR_AMD:
- return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_AMD;
- case gpu::GpuPreferences::VPX_VENDOR_ALL:
- return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_ALL;
- }
- NOTREACHED();
- return gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE;
- }
-
- static bool FromMojom(gpu::mojom::VpxDecodeVendors input,
- gpu::GpuPreferences::VpxDecodeVendors* out) {
- switch (input) {
- case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_NONE:
- *out = gpu::GpuPreferences::VPX_VENDOR_NONE;
- return true;
- case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_MICROSOFT:
- *out = gpu::GpuPreferences::VPX_VENDOR_MICROSOFT;
- return true;
- case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_AMD:
- *out = gpu::GpuPreferences::VPX_VENDOR_AMD;
- return true;
- case gpu::mojom::VpxDecodeVendors::VPX_VENDOR_ALL:
- *out = gpu::GpuPreferences::VPX_VENDOR_ALL;
- return true;
- }
- return false;
- }
-};
-
-template <>
struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool Read(gpu::mojom::GpuPreferencesDataView prefs,
gpu::GpuPreferences* out) {
@@ -63,9 +24,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->gpu_startup_dialog = prefs.gpu_startup_dialog();
out->disable_gpu_watchdog = prefs.disable_gpu_watchdog();
out->gpu_sandbox_start_early = prefs.gpu_sandbox_start_early();
- if (!prefs.ReadEnableAcceleratedVpxDecode(
- &out->enable_accelerated_vpx_decode))
- return false;
out->enable_low_latency_dxva = prefs.enable_low_latency_dxva();
out->enable_zero_copy_dxgi_video = prefs.enable_zero_copy_dxgi_video();
out->enable_nv12_dxgi_video = prefs.enable_nv12_dxgi_video();
@@ -94,7 +52,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
prefs.enable_threaded_texture_mailboxes();
out->gl_shader_interm_output = prefs.gl_shader_interm_output();
out->emulate_shader_precision = prefs.emulate_shader_precision();
- out->max_active_webgl_contexts = prefs.max_active_webgl_contexts();
out->enable_android_surface_control =
prefs.enable_android_surface_control();
out->enable_gpu_service_logging = prefs.enable_gpu_service_logging();
@@ -124,6 +81,7 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
out->disable_vulkan_surface = prefs.disable_vulkan_surface();
out->disable_vulkan_fallback_to_gl_for_testing =
prefs.disable_vulkan_fallback_to_gl_for_testing();
+ out->enable_metal = prefs.enable_metal();
out->enable_gpu_benchmarking_extension =
prefs.enable_gpu_benchmarking_extension();
out->enable_webgpu = prefs.enable_webgpu();
@@ -147,11 +105,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool gpu_sandbox_start_early(const gpu::GpuPreferences& prefs) {
return prefs.gpu_sandbox_start_early;
}
-
- static gpu::GpuPreferences::VpxDecodeVendors enable_accelerated_vpx_decode(
- const gpu::GpuPreferences& prefs) {
- return prefs.enable_accelerated_vpx_decode;
- }
static bool enable_low_latency_dxva(const gpu::GpuPreferences& prefs) {
return prefs.enable_low_latency_dxva;
}
@@ -221,9 +174,6 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
static bool emulate_shader_precision(const gpu::GpuPreferences& prefs) {
return prefs.emulate_shader_precision;
}
- static uint32_t max_active_webgl_contexts(const gpu::GpuPreferences& prefs) {
- return prefs.max_active_webgl_contexts;
- }
static bool enable_android_surface_control(const gpu::GpuPreferences& prefs) {
return prefs.enable_android_surface_control;
}
@@ -273,6 +223,9 @@ struct StructTraits<gpu::mojom::GpuPreferencesDataView, gpu::GpuPreferences> {
const gpu::GpuPreferences& prefs) {
return prefs.disable_vulkan_fallback_to_gl_for_testing;
}
+ static bool enable_metal(const gpu::GpuPreferences& prefs) {
+ return prefs.enable_metal;
+ }
static bool enable_gpu_benchmarking_extension(
const gpu::GpuPreferences& prefs) {
return prefs.enable_gpu_benchmarking_extension;
diff --git a/chromium/gpu/ipc/common/gpu_surface_lookup.h b/chromium/gpu/ipc/common/gpu_surface_lookup.h
index 5c29f31e908..5ae2c152bc5 100644
--- a/chromium/gpu/ipc/common/gpu_surface_lookup.h
+++ b/chromium/gpu/ipc/common/gpu_surface_lookup.h
@@ -27,10 +27,13 @@ class GPU_EXPORT GpuSurfaceLookup {
static void InitInstance(GpuSurfaceLookup* lookup);
virtual gfx::AcceleratedWidget AcquireNativeWidget(
- gpu::SurfaceHandle surface_handle) = 0;
+ gpu::SurfaceHandle surface_handle,
+ bool* can_be_used_with_surface_control) = 0;
#if defined(OS_ANDROID)
- virtual gl::ScopedJavaSurface AcquireJavaSurface(int surface_id) = 0;
+ virtual gl::ScopedJavaSurface AcquireJavaSurface(
+ int surface_id,
+ bool* can_be_used_with_surface_control) = 0;
#endif
private:
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.cc b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
index 4547d4a0b77..c8f57e56685 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.cc
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.cc
@@ -15,9 +15,12 @@
namespace gpu {
#if defined(OS_ANDROID)
-GpuSurfaceTracker::SurfaceRecord::SurfaceRecord(gfx::AcceleratedWidget widget,
- jobject j_surface)
- : widget(widget) {
+GpuSurfaceTracker::SurfaceRecord::SurfaceRecord(
+ gfx::AcceleratedWidget widget,
+ jobject j_surface,
+ bool can_be_used_with_surface_control)
+ : widget(widget),
+ can_be_used_with_surface_control(can_be_used_with_surface_control) {
// TODO(liberato): It would be nice to assert |surface != nullptr|, but we
// can't. in_process_context_factory.cc (for tests) actually calls us without
// a Surface from java. Presumably, nobody uses it. crbug.com/712717 .
@@ -64,7 +67,8 @@ void GpuSurfaceTracker::RemoveSurface(gpu::SurfaceHandle surface_handle) {
}
gfx::AcceleratedWidget GpuSurfaceTracker::AcquireNativeWidget(
- gpu::SurfaceHandle surface_handle) {
+ gpu::SurfaceHandle surface_handle,
+ bool* can_be_used_with_surface_control) {
base::AutoLock lock(surface_map_lock_);
SurfaceMap::iterator it = surface_map_.find(surface_handle);
if (it == surface_map_.end())
@@ -73,6 +77,8 @@ gfx::AcceleratedWidget GpuSurfaceTracker::AcquireNativeWidget(
#if defined(OS_ANDROID)
if (it->second.widget != gfx::kNullAcceleratedWidget)
ANativeWindow_acquire(it->second.widget);
+ *can_be_used_with_surface_control =
+ it->second.can_be_used_with_surface_control;
#endif // defined(OS_ANDROID)
return it->second.widget;
@@ -80,7 +86,8 @@ gfx::AcceleratedWidget GpuSurfaceTracker::AcquireNativeWidget(
#if defined(OS_ANDROID)
gl::ScopedJavaSurface GpuSurfaceTracker::AcquireJavaSurface(
- gpu::SurfaceHandle surface_handle) {
+ gpu::SurfaceHandle surface_handle,
+ bool* can_be_used_with_surface_control) {
base::AutoLock lock(surface_map_lock_);
SurfaceMap::const_iterator it = surface_map_.find(surface_handle);
if (it == surface_map_.end())
@@ -88,6 +95,9 @@ gl::ScopedJavaSurface GpuSurfaceTracker::AcquireJavaSurface(
const gl::ScopedJavaSurface& j_surface = it->second.surface;
DCHECK(j_surface.IsValid());
+
+ *can_be_used_with_surface_control =
+ it->second.can_be_used_with_surface_control;
return gl::ScopedJavaSurface::AcquireExternalSurface(
j_surface.j_surface().obj());
}
diff --git a/chromium/gpu/ipc/common/gpu_surface_tracker.h b/chromium/gpu/ipc/common/gpu_surface_tracker.h
index 52cf377920c..9542b22198d 100644
--- a/chromium/gpu/ipc/common/gpu_surface_tracker.h
+++ b/chromium/gpu/ipc/common/gpu_surface_tracker.h
@@ -36,7 +36,9 @@ class GPU_EXPORT GpuSurfaceTracker : public gpu::GpuSurfaceLookup {
public:
struct SurfaceRecord {
#if defined(OS_ANDROID)
- SurfaceRecord(gfx::AcceleratedWidget widget, jobject j_surface);
+ SurfaceRecord(gfx::AcceleratedWidget widget,
+ jobject j_surface,
+ bool can_be_used_with_surface_control);
#else // defined(OS_ANDROID)
explicit SurfaceRecord(gfx::AcceleratedWidget widget);
#endif // !defined(OS_ANDROID)
@@ -47,6 +49,7 @@ class GPU_EXPORT GpuSurfaceTracker : public gpu::GpuSurfaceLookup {
gfx::AcceleratedWidget widget;
#if defined(OS_ANDROID)
gl::ScopedJavaSurface surface;
+ bool can_be_used_with_surface_control;
#endif
};
@@ -54,11 +57,13 @@ class GPU_EXPORT GpuSurfaceTracker : public gpu::GpuSurfaceLookup {
// Returns the native widget associated with a given surface_handle.
// On Android, this adds a reference on the ANativeWindow.
gfx::AcceleratedWidget AcquireNativeWidget(
- gpu::SurfaceHandle surface_handle) override;
+ gpu::SurfaceHandle surface_handle,
+ bool* can_be_used_with_surface_control) override;
#if defined(OS_ANDROID)
gl::ScopedJavaSurface AcquireJavaSurface(
- gpu::SurfaceHandle surface_handle) override;
+ gpu::SurfaceHandle surface_handle,
+ bool* can_be_used_with_surface_control) override;
#endif
// Gets the global instance of the surface tracker.
diff --git a/chromium/gpu/ipc/common/struct_traits_unittest.cc b/chromium/gpu/ipc/common/struct_traits_unittest.cc
index 83a4aa2f7f9..e67bce0d76f 100644
--- a/chromium/gpu/ipc/common/struct_traits_unittest.cc
+++ b/chromium/gpu/ipc/common/struct_traits_unittest.cc
@@ -156,8 +156,8 @@ TEST_F(StructTraitsTest, GpuInfo) {
#if defined(OS_WIN)
const bool direct_composition = true;
const bool supports_overlays = true;
- const gpu::OverlayCapabilities overlay_capabilities = {
- {OverlayFormat::kBGRA, false}, {OverlayFormat::kNV12, true}};
+ const OverlaySupport yuy2_overlay_support = OverlaySupport::kScaling;
+ const OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
const DxDiagNode dx_diagnostics;
#endif
const gpu::VideoDecodeAcceleratorCapabilities
@@ -200,7 +200,8 @@ TEST_F(StructTraitsTest, GpuInfo) {
#if defined(OS_WIN)
input.direct_composition = direct_composition;
input.supports_overlays = supports_overlays;
- input.overlay_capabilities = overlay_capabilities;
+ input.yuy2_overlay_support = yuy2_overlay_support;
+ input.nv12_overlay_support = nv12_overlay_support;
input.dx_diagnostics = dx_diagnostics;
#endif
input.video_decode_accelerator_capabilities =
@@ -259,7 +260,8 @@ TEST_F(StructTraitsTest, GpuInfo) {
#if defined(OS_WIN)
EXPECT_EQ(direct_composition, output.direct_composition);
EXPECT_EQ(supports_overlays, output.supports_overlays);
- EXPECT_EQ(overlay_capabilities, output.overlay_capabilities);
+ EXPECT_EQ(yuy2_overlay_support, output.yuy2_overlay_support);
+ EXPECT_EQ(nv12_overlay_support, output.nv12_overlay_support);
EXPECT_EQ(dx_diagnostics.values, output.dx_diagnostics.values);
#endif
EXPECT_EQ(output.video_decode_accelerator_capabilities.flags,
@@ -420,11 +422,6 @@ TEST_F(StructTraitsTest, GpuPreferences) {
GpuPreferences prefs;
prefs.gpu_startup_dialog = true;
prefs.disable_gpu_watchdog = true;
-#if defined(OS_WIN)
- const GpuPreferences::VpxDecodeVendors vendor =
- GpuPreferences::VPX_VENDOR_AMD;
- prefs.enable_accelerated_vpx_decode = vendor;
-#endif
prefs.enable_gpu_driver_debug_logging = true;
mojom::TraitsTestServicePtr proxy = GetTraitsTestProxy();
@@ -433,9 +430,6 @@ TEST_F(StructTraitsTest, GpuPreferences) {
EXPECT_TRUE(echo.gpu_startup_dialog);
EXPECT_TRUE(echo.disable_gpu_watchdog);
EXPECT_TRUE(echo.enable_gpu_driver_debug_logging);
-#if defined(OS_WIN)
- EXPECT_EQ(vendor, echo.enable_accelerated_vpx_decode);
-#endif
}
TEST_F(StructTraitsTest, GpuFeatureInfo) {
diff --git a/chromium/gpu/ipc/common/typemaps.gni b/chromium/gpu/ipc/common/typemaps.gni
index 54e4bf2d5c9..cf81e70769f 100644
--- a/chromium/gpu/ipc/common/typemaps.gni
+++ b/chromium/gpu/ipc/common/typemaps.gni
@@ -14,4 +14,5 @@ typemaps = [
"//gpu/ipc/common/memory_stats.typemap",
"//gpu/ipc/common/surface_handle.typemap",
"//gpu/ipc/common/sync_token.typemap",
+ "//gpu/ipc/common/vulkan_ycbcr_info.typemap",
]
diff --git a/chromium/gpu/ipc/common/vulkan_ycbcr_info.cc b/chromium/gpu/ipc/common/vulkan_ycbcr_info.cc
new file mode 100644
index 00000000000..ac0fbbe9f17
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_ycbcr_info.cc
@@ -0,0 +1,24 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
+
+namespace gpu {
+
+VulkanYCbCrInfo::VulkanYCbCrInfo() = default;
+
+VulkanYCbCrInfo::VulkanYCbCrInfo(uint32_t suggested_ycbcr_model,
+ uint32_t suggested_ycbcr_range,
+ uint32_t suggested_xchroma_offset,
+ uint32_t suggested_ychroma_offset,
+ uint64_t external_format,
+ uint32_t format_features)
+ : suggested_ycbcr_model(suggested_ycbcr_model),
+ suggested_ycbcr_range(suggested_ycbcr_range),
+ suggested_xchroma_offset(suggested_xchroma_offset),
+ suggested_ychroma_offset(suggested_ychroma_offset),
+ external_format(external_format),
+ format_features(format_features) {}
+
+} // namespace gpu
diff --git a/chromium/gpu/ipc/common/vulkan_ycbcr_info.h b/chromium/gpu/ipc/common/vulkan_ycbcr_info.h
new file mode 100644
index 00000000000..4ac17285110
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_ycbcr_info.h
@@ -0,0 +1,58 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_VULKAN_YCBCR_INFO_H_
+#define GPU_IPC_COMMON_VULKAN_YCBCR_INFO_H_
+
+#include <stdint.h>
+
+#include "gpu/gpu_export.h"
+
+namespace gpu {
+
+// Sampler Ycbcr conversion information.
+struct GPU_EXPORT VulkanYCbCrInfo {
+ VulkanYCbCrInfo();
+ VulkanYCbCrInfo(uint32_t suggested_ycbcr_model,
+ uint32_t suggested_ycbcr_range,
+ uint32_t suggested_xchroma_offset,
+ uint32_t suggested_ychroma_offset,
+ uint64_t external_format,
+ uint32_t format_features);
+
+ // Describes the color matrix for conversion between color models.
+ // Corresponds to vulkan type: VkSamplerYcbcrModelConversion.
+ uint32_t suggested_ycbcr_model;
+
+ // Describes whether the encoded values have headroom and foot room, or
+ // whether the encoding uses the full numerical range.
+ // Corresponds to vulkan type: VkSamplerYcbcrRange.
+ uint32_t suggested_ycbcr_range;
+
+ // Describes the sample location associated with downsampled chroma channels
+ // in the x dimension. It has no effect for formats in which chroma channels
+ // are the same resolution as the luma channel.
+ // Corresponds to vulkan type: VkChromaLocation.
+ uint32_t suggested_xchroma_offset;
+
+ // Describes the sample location associated with downsampled chroma channels
+ // in the y dimension. It has no effect for formats in which chroma channels
+ // are not downsampled vertically.
+ // Corresponds to vulkan type: VkChromaLocation.
+ uint32_t suggested_ychroma_offset;
+
+ // Implementation-defined external format identifier for use with
+ // VkExternalFormatANDROID.
+ // This property is driver specific.
+ uint64_t external_format;
+
+ // Describes the capabilities of the external format when used with an image
+ // bound to memory imported from buffer.
+ // Corresponds to vulkan type: VkFormatFeatureFlags.
+ uint32_t format_features;
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_COMMON_VULKAN_YCBCR_INFO_H_
diff --git a/chromium/gpu/ipc/common/vulkan_ycbcr_info.mojom b/chromium/gpu/ipc/common/vulkan_ycbcr_info.mojom
new file mode 100644
index 00000000000..4c755cea00b
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_ycbcr_info.mojom
@@ -0,0 +1,19 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module gpu.mojom;
+
+// Sampler Ycbcr conversion information. All of this struct parameters are
+// enums defined in the vulkan api which are passed as uint32/uint64 over ipc.
+// We use all of these values in an "opaque" way and don't consume it directly
+// in chrome.
+// See gpu/ipc/common/vulkan_ycbcr_info.h.
+struct VulkanYCbCrInfo {
+ uint32 suggested_ycbcr_model;
+ uint32 suggested_ycbcr_range;
+ uint32 suggested_xchroma_offset;
+ uint32 suggested_ychroma_offset;
+ uint64 external_format;
+ uint32 format_features;
+};
diff --git a/chromium/gpu/ipc/common/vulkan_ycbcr_info.typemap b/chromium/gpu/ipc/common/vulkan_ycbcr_info.typemap
new file mode 100644
index 00000000000..f7a4e24cf6b
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_ycbcr_info.typemap
@@ -0,0 +1,8 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+mojom = "//gpu/ipc/common/vulkan_ycbcr_info.mojom"
+public_headers = [ "//gpu/ipc/common/vulkan_ycbcr_info.h" ]
+traits_headers = [ "//gpu/ipc/common/vulkan_ycbcr_info_mojom_traits.h" ]
+type_mappings = [ "gpu.mojom.VulkanYCbCrInfo=::gpu::VulkanYCbCrInfo" ]
diff --git a/chromium/gpu/ipc/common/vulkan_ycbcr_info_mojom_traits.h b/chromium/gpu/ipc/common/vulkan_ycbcr_info_mojom_traits.h
new file mode 100644
index 00000000000..88e8aa95700
--- /dev/null
+++ b/chromium/gpu/ipc/common/vulkan_ycbcr_info_mojom_traits.h
@@ -0,0 +1,53 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_VULKAN_YCBCR_INFO_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_VULKAN_YCBCR_INFO_MOJOM_TRAITS_H_
+
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info.mojom-shared.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<gpu::mojom::VulkanYCbCrInfoDataView, gpu::VulkanYCbCrInfo> {
+ static uint32_t suggested_ycbcr_model(const gpu::VulkanYCbCrInfo& info) {
+ return info.suggested_ycbcr_model;
+ }
+
+ static uint32_t suggested_ycbcr_range(const gpu::VulkanYCbCrInfo& info) {
+ return info.suggested_ycbcr_range;
+ }
+
+ static uint32_t suggested_xchroma_offset(const gpu::VulkanYCbCrInfo& info) {
+ return info.suggested_xchroma_offset;
+ }
+
+ static uint32_t suggested_ychroma_offset(const gpu::VulkanYCbCrInfo& info) {
+ return info.suggested_ychroma_offset;
+ }
+
+ static uint64_t external_format(const gpu::VulkanYCbCrInfo& info) {
+ return info.external_format;
+ }
+
+ static uint32_t format_features(const gpu::VulkanYCbCrInfo& info) {
+ return info.format_features;
+ }
+
+ static bool Read(gpu::mojom::VulkanYCbCrInfoDataView data,
+ gpu::VulkanYCbCrInfo* out) {
+ out->suggested_ycbcr_model = data.suggested_ycbcr_model();
+ out->suggested_ycbcr_range = data.suggested_ycbcr_range();
+ out->suggested_xchroma_offset = data.suggested_xchroma_offset();
+ out->suggested_ychroma_offset = data.suggested_ychroma_offset();
+ out->external_format = data.external_format();
+ out->format_features = data.format_features();
+ return true;
+ }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_VULKAN_YCBCR_INFO_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/gl_in_process_context.cc b/chromium/gpu/ipc/gl_in_process_context.cc
index af3fde4f9ac..cee793b2982 100644
--- a/chromium/gpu/ipc/gl_in_process_context.cc
+++ b/chromium/gpu/ipc/gl_in_process_context.cc
@@ -75,11 +75,11 @@ ContextResult GLInProcessContext::Initialize(
command_buffer_ = std::make_unique<InProcessCommandBuffer>(
task_executor, GURL("chrome://gpu/GLInProcessContext::Initialize"));
- auto result = command_buffer_->Initialize(
- surface, is_offscreen, window, attribs, /*share_command_buffer=*/nullptr,
- gpu_memory_buffer_manager, image_factory,
- /*gpu_channel_manager_delegate=*/nullptr, std::move(task_runner), nullptr,
- nullptr);
+ auto result =
+ command_buffer_->Initialize(surface, is_offscreen, window, attribs,
+ gpu_memory_buffer_manager, image_factory,
+ /*gpu_channel_manager_delegate=*/nullptr,
+ std::move(task_runner), nullptr, nullptr);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.cc b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
index 2c20bb029c7..9f85b623f18 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.cc
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.cc
@@ -61,7 +61,8 @@ GpuInProcessThreadService::GpuInProcessThreadService(
const GpuFeatureInfo& gpu_feature_info,
const GpuPreferences& gpu_preferences,
SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache)
+ gles2::ProgramCache* program_cache,
+ scoped_refptr<SharedContextState> shared_context_state)
: CommandBufferTaskExecutor(gpu_preferences,
gpu_feature_info,
sync_point_manager,
@@ -69,7 +70,8 @@ GpuInProcessThreadService::GpuInProcessThreadService(
share_group,
share_group_surface_format,
shared_image_manager,
- program_cache),
+ program_cache,
+ std::move(shared_context_state)),
task_runner_(task_runner),
scheduler_(scheduler) {}
diff --git a/chromium/gpu/ipc/gpu_in_process_thread_service.h b/chromium/gpu/ipc/gpu_in_process_thread_service.h
index 98a0f88f950..39775b735fc 100644
--- a/chromium/gpu/ipc/gpu_in_process_thread_service.h
+++ b/chromium/gpu/ipc/gpu_in_process_thread_service.h
@@ -37,7 +37,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT GpuInProcessThreadService
const GpuFeatureInfo& gpu_feature_info,
const GpuPreferences& gpu_preferences,
SharedImageManager* shared_image_manager,
- gles2::ProgramCache* program_cache);
+ gles2::ProgramCache* program_cache,
+ scoped_refptr<SharedContextState> shared_context_state);
~GpuInProcessThreadService() override;
// CommandBufferTaskExecutor implementation.
diff --git a/chromium/gpu/ipc/host/BUILD.gn b/chromium/gpu/ipc/host/BUILD.gn
index 845ac14fd7b..6c0930ab208 100644
--- a/chromium/gpu/ipc/host/BUILD.gn
+++ b/chromium/gpu/ipc/host/BUILD.gn
@@ -8,8 +8,6 @@ source_set("host") {
sources = [
"gpu_memory_buffer_support.cc",
"gpu_memory_buffer_support.h",
- "gpu_switches.cc",
- "gpu_switches.h",
"shader_disk_cache.cc",
"shader_disk_cache.h",
]
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
index 22185781f5c..beafdd26c94 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.cc
@@ -4,83 +4,47 @@
#include "gpu/ipc/host/gpu_memory_buffer_support.h"
-#include "base/command_line.h"
#include "base/logging.h"
#include "build/build_config.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
-#include "gpu/ipc/host/gpu_switches.h"
#include "ui/gl/gl_bindings.h"
namespace gpu {
-bool AreNativeGpuMemoryBuffersEnabled() {
-#if defined(OS_MACOSX) || defined(OS_FUCHSIA)
- return !base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableNativeGpuMemoryBuffers);
-#else
- return base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kEnableNativeGpuMemoryBuffers);
-#endif
-}
-
GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations(
GpuMemoryBufferSupport* support) {
GpuMemoryBufferConfigurationSet configurations;
#if defined(USE_OZONE) || defined(OS_MACOSX) || defined(OS_WIN) || \
defined(OS_ANDROID)
- if (AreNativeGpuMemoryBuffersEnabled()) {
- const gfx::BufferFormat kNativeFormats[] = {
- gfx::BufferFormat::R_8,
- gfx::BufferFormat::RG_88,
- gfx::BufferFormat::R_16,
- gfx::BufferFormat::BGR_565,
- gfx::BufferFormat::RGBA_4444,
- gfx::BufferFormat::RGBA_8888,
- gfx::BufferFormat::BGRA_8888,
- gfx::BufferFormat::BGRX_1010102,
- gfx::BufferFormat::RGBX_1010102,
- gfx::BufferFormat::RGBA_F16,
- gfx::BufferFormat::UYVY_422,
- gfx::BufferFormat::YVU_420,
- gfx::BufferFormat::YUV_420_BIPLANAR};
- const gfx::BufferUsage kNativeUsages[] = {
- gfx::BufferUsage::GPU_READ,
- gfx::BufferUsage::SCANOUT,
- gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
- gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
- gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
- gfx::BufferUsage::GPU_READ_CPU_READ_WRITE};
- for (auto format : kNativeFormats) {
- for (auto usage : kNativeUsages) {
- if (support->IsNativeGpuMemoryBufferConfigurationSupported(format,
- usage))
- configurations.insert(std::make_pair(format, usage));
- }
- }
- }
+ const gfx::BufferFormat kBufferFormats[] = {
+ gfx::BufferFormat::R_8, gfx::BufferFormat::R_16,
+ gfx::BufferFormat::RG_88, gfx::BufferFormat::BGR_565,
+ gfx::BufferFormat::RGBA_4444, gfx::BufferFormat::RGBX_8888,
+ gfx::BufferFormat::RGBA_8888, gfx::BufferFormat::BGRX_8888,
+ gfx::BufferFormat::BGRX_1010102, gfx::BufferFormat::RGBX_1010102,
+ gfx::BufferFormat::BGRA_8888, gfx::BufferFormat::RGBA_F16,
+ gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR,
+ gfx::BufferFormat::UYVY_422};
- const gfx::BufferFormat kGPUReadWriteFormats[] = {
- gfx::BufferFormat::BGR_565, gfx::BufferFormat::RGBA_8888,
- gfx::BufferFormat::RGBX_8888, gfx::BufferFormat::BGRA_8888,
- gfx::BufferFormat::BGRX_8888, gfx::BufferFormat::UYVY_422,
- gfx::BufferFormat::YVU_420, gfx::BufferFormat::YUV_420_BIPLANAR,
- gfx::BufferFormat::R_8};
- const gfx::BufferUsage kGPUReadWriteUsages[] = {
+ const gfx::BufferUsage kUsages[] = {
gfx::BufferUsage::GPU_READ,
gfx::BufferUsage::SCANOUT,
gfx::BufferUsage::SCANOUT_CAMERA_READ_WRITE,
gfx::BufferUsage::CAMERA_AND_CPU_READ_WRITE,
gfx::BufferUsage::SCANOUT_CPU_READ_WRITE,
- gfx::BufferUsage::SCANOUT_VDA_WRITE};
- for (auto format : kGPUReadWriteFormats) {
- for (auto usage : kGPUReadWriteUsages) {
+ gfx::BufferUsage::SCANOUT_VDA_WRITE,
+ gfx::BufferUsage::GPU_READ_CPU_READ_WRITE};
+
+ for (auto format : kBufferFormats) {
+ for (auto usage : kUsages) {
if (support->IsNativeGpuMemoryBufferConfigurationSupported(format, usage))
configurations.insert(std::make_pair(format, usage));
}
}
-#endif // defined(USE_OZONE) || defined(OS_MACOSX) || defined(OS_WIN)
+#endif // defined(USE_OZONE) || defined(OS_MACOSX) || defined(OS_WIN) ||
+ // defined(OS_ANDROID)
return configurations;
}
diff --git a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
index 3003eec7c99..f3deeb62a01 100644
--- a/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
+++ b/chromium/gpu/ipc/host/gpu_memory_buffer_support.h
@@ -38,8 +38,6 @@ namespace gpu {
class GpuMemoryBufferSupport;
-bool AreNativeGpuMemoryBuffersEnabled();
-
// Returns the set of supported configurations.
GpuMemoryBufferConfigurationSet GetNativeGpuMemoryBufferConfigurations(
GpuMemoryBufferSupport* support);
diff --git a/chromium/gpu/ipc/host/gpu_switches.cc b/chromium/gpu/ipc/host/gpu_switches.cc
deleted file mode 100644
index 1834e3c64b8..00000000000
--- a/chromium/gpu/ipc/host/gpu_switches.cc
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/host/gpu_switches.h"
-
-namespace switches {
-
-// Enable native GPU memory buffer support when available.
-const char kEnableNativeGpuMemoryBuffers[] = "enable-native-gpu-memory-buffers";
-
-// Disables native GPU memory buffer support.
-const char kDisableNativeGpuMemoryBuffers[] =
- "disable-native-gpu-memory-buffers";
-
-} // namespace switches
diff --git a/chromium/gpu/ipc/host/gpu_switches.h b/chromium/gpu/ipc/host/gpu_switches.h
deleted file mode 100644
index 7f205af4f4a..00000000000
--- a/chromium/gpu/ipc/host/gpu_switches.h
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Defines all the command-line switches used by gpu/ipc/host
-
-#ifndef GPU_IPC_HOST_GPU_SWITCHES_H_
-#define GPU_IPC_HOST_GPU_SWITCHES_H_
-
-namespace switches {
-
-extern const char kEnableNativeGpuMemoryBuffers[];
-extern const char kDisableNativeGpuMemoryBuffers[];
-
-} // namespace switches
-
-#endif // GPU_IPC_HOST_GPU_SWITCHES_H_
diff --git a/chromium/gpu/ipc/in_process_command_buffer.cc b/chromium/gpu/ipc/in_process_command_buffer.cc
index db1bd098d06..6556ea74227 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.cc
+++ b/chromium/gpu/ipc/in_process_command_buffer.cc
@@ -27,6 +27,7 @@
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
+#include "cc/base/completion_event.h"
#include "components/viz/common/features.h"
#include "gpu/command_buffer/client/gpu_control_client.h"
#include "gpu/command_buffer/client/gpu_memory_buffer_manager.h"
@@ -115,6 +116,24 @@ class ScopedEvent {
base::WaitableEvent* event_;
};
+void AddGLSurfaceRefOnGpuThread(gl::GLSurface* surface) {
+ surface->AddRef();
+}
+
+void ReleaseGLSurfaceOnGpuThread(gl::GLSurface* surface,
+ cc::CompletionEvent* event) {
+ surface->Release();
+ event->Signal();
+}
+
+void ReleaseGLSurfaceOnClientThread(gl::GLSurface* surface,
+ CommandBufferTaskExecutor* task_executor) {
+ cc::CompletionEvent event;
+ task_executor->ScheduleOutOfOrderTask(base::BindOnce(
+ &ReleaseGLSurfaceOnGpuThread, base::Unretained(surface), &event));
+ event.Wait();
+}
+
} // namespace
class InProcessCommandBuffer::SharedImageInterface
@@ -206,6 +225,21 @@ class InProcessCommandBuffer::SharedImageInterface
return mailbox;
}
+#if defined(OS_WIN)
+ SwapChainMailboxes CreateSwapChain(viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage) override {
+ NOTREACHED();
+ return {};
+ }
+
+ void PresentSwapChain(const SyncToken& sync_token,
+ const Mailbox& mailbox) override {
+ NOTREACHED();
+ }
+#endif // OS_WIN
+
void UpdateSharedImage(const SyncToken& sync_token,
const Mailbox& mailbox) override {
base::AutoLock lock(lock_);
@@ -304,8 +338,24 @@ gpu::SharedImageInterface* InProcessCommandBuffer::GetSharedImageInterface()
return shared_image_interface_.get();
}
+base::ScopedClosureRunner InProcessCommandBuffer::GetCacheBackBufferCb() {
+ // It is safe to use base::Unretained for |surface_| here since the we use a
+ // synchronous task to create and destroy it from the client thread.
+ task_executor_->ScheduleOutOfOrderTask(base::BindOnce(
+ &AddGLSurfaceRefOnGpuThread, base::Unretained(surface_.get())));
+
+ // Also safe to use base::Unretained for |task_executor_| since the caller is
+ // supposed to guarentee that it outlives the callback.
+ return base::ScopedClosureRunner(base::BindOnce(
+ &ReleaseGLSurfaceOnClientThread, base::Unretained(surface_.get()),
+ base::Unretained(task_executor_)));
+}
+
bool InProcessCommandBuffer::MakeCurrent() {
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!context_) {
+ return true;
+ }
if (error::IsError(command_buffer_->GetState().error)) {
DLOG(ERROR) << "MakeCurrent failed because context lost.";
@@ -335,7 +385,6 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
bool is_offscreen,
SurfaceHandle surface_handle,
const ContextCreationAttribs& attribs,
- InProcessCommandBuffer* share_group,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
ImageFactory* image_factory,
GpuChannelManagerDelegate* gpu_channel_manager_delegate,
@@ -343,7 +392,6 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
gpu::raster::GrShaderCache* gr_shader_cache,
GpuProcessActivityFlags* activity_flags) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
- DCHECK(!share_group || task_executor_ == share_group->task_executor_);
TRACE_EVENT0("gpu", "InProcessCommandBuffer::Initialize")
is_offscreen_ = is_offscreen;
@@ -365,10 +413,12 @@ gpu::ContextResult InProcessCommandBuffer::Initialize(
origin_task_runner_ = std::move(task_runner);
}
+ client_thread_weak_ptr_ = client_thread_weak_ptr_factory_.GetWeakPtr();
+
Capabilities capabilities;
InitializeOnGpuThreadParams params(surface_handle, attribs, &capabilities,
- share_group, image_factory,
- gr_shader_cache, activity_flags);
+ image_factory, gr_shader_cache,
+ activity_flags);
base::OnceCallback<gpu::ContextResult(void)> init_task =
base::BindOnce(&InProcessCommandBuffer::InitializeOnGpuThread,
@@ -404,47 +454,37 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
GpuDriverBugWorkarounds workarounds(
task_executor_->gpu_feature_info().enabled_gpu_driver_bug_workarounds);
- if (params.share_command_buffer) {
- context_group_ = params.share_command_buffer->context_group_;
- } else {
- std::unique_ptr<MemoryTracker> memory_tracker;
- // Android WebView won't have a memory tracker.
- if (task_executor_->ShouldCreateMemoryTracker()) {
- const uint64_t client_tracing_id =
- base::trace_event::MemoryDumpManager::GetInstance()
- ->GetTracingProcessId();
- memory_tracker = std::make_unique<GpuCommandBufferMemoryTracker>(
- kInProcessCommandBufferClientId, client_tracing_id,
- command_buffer_id_.GetUnsafeValue(), params.attribs.context_type,
- base::ThreadTaskRunnerHandle::Get());
- }
-
- gpu::GpuFeatureInfo gpu_feature_info = task_executor_->gpu_feature_info();
- if (params.attribs.backed_by_surface_texture) {
- gpu_feature_info.status_values[GPU_FEATURE_TYPE_ANDROID_SURFACE_CONTROL] =
- kGpuFeatureStatusDisabled;
- }
- auto feature_info =
- base::MakeRefCounted<gles2::FeatureInfo>(workarounds, gpu_feature_info);
- context_group_ = base::MakeRefCounted<gles2::ContextGroup>(
- task_executor_->gpu_preferences(),
- gles2::PassthroughCommandDecoderSupported(),
- task_executor_->mailbox_manager(), std::move(memory_tracker),
- task_executor_->shader_translator_cache(),
- task_executor_->framebuffer_completeness_cache(), feature_info,
- params.attribs.bind_generates_resource, task_executor_->image_manager(),
- params.image_factory, nullptr /* progress_reporter */,
- task_executor_->gpu_feature_info(),
- task_executor_->discardable_manager(),
- task_executor_->passthrough_discardable_manager(),
- task_executor_->shared_image_manager());
+ std::unique_ptr<MemoryTracker> memory_tracker;
+ // Android WebView won't have a memory tracker.
+ if (task_executor_->ShouldCreateMemoryTracker()) {
+ const uint64_t client_tracing_id =
+ base::trace_event::MemoryDumpManager::GetInstance()
+ ->GetTracingProcessId();
+ memory_tracker = std::make_unique<GpuCommandBufferMemoryTracker>(
+ kInProcessCommandBufferClientId, client_tracing_id,
+ command_buffer_id_.GetUnsafeValue(), params.attribs.context_type,
+ base::ThreadTaskRunnerHandle::Get());
}
+ auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
+ workarounds, task_executor_->gpu_feature_info());
+ context_group_ = base::MakeRefCounted<gles2::ContextGroup>(
+ task_executor_->gpu_preferences(),
+ gles2::PassthroughCommandDecoderSupported(),
+ task_executor_->mailbox_manager(), std::move(memory_tracker),
+ task_executor_->shader_translator_cache(),
+ task_executor_->framebuffer_completeness_cache(), feature_info,
+ params.attribs.bind_generates_resource, task_executor_->image_manager(),
+ params.image_factory, nullptr /* progress_reporter */,
+ task_executor_->gpu_feature_info(), task_executor_->discardable_manager(),
+ task_executor_->passthrough_discardable_manager(),
+ task_executor_->shared_image_manager());
+
#if defined(OS_MACOSX)
- // Virtualize PreferIntegratedGpu contexts by default on OS X to prevent
+ // Virtualize GpuPreference:::kLowPower contexts by default on OS X to prevent
// performance regressions when enabling FCM. https://crbug.com/180463
use_virtualized_gl_context_ |=
- (params.attribs.gpu_preference == gl::PreferIntegratedGpu);
+ (params.attribs.gpu_preference == gl::GpuPreference::kLowPower);
#endif
use_virtualized_gl_context_ |= task_executor_->ForceVirtualizedGLContexts();
@@ -468,15 +508,22 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
command_buffer_ = std::make_unique<CommandBufferService>(
this, context_group_->memory_tracker());
+ context_state_ = task_executor_->shared_context_state();
+
if (!surface_) {
if (is_offscreen_) {
- // TODO(crbug.com/832243): GLES2CommandBufferStub has additional logic for
- // offscreen surfaces that might be needed here.
- surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
- if (!surface_.get()) {
- DestroyOnGpuThread();
- LOG(ERROR) << "ContextResult::kFatalFailure: Failed to create surface.";
- return gpu::ContextResult::kFatalFailure;
+ if (context_state_) {
+ surface_ = context_state_->surface();
+ } else {
+ // TODO(crbug.com/832243): GLES2CommandBufferStub has additional logic
+ // for offscreen surfaces that might be needed here.
+ surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
+ if (!surface_.get()) {
+ DestroyOnGpuThread();
+ LOG(ERROR)
+ << "ContextResult::kFatalFailure: Failed to create surface.";
+ return gpu::ContextResult::kFatalFailure;
+ }
}
} else {
gl::GLSurfaceFormat surface_format;
@@ -541,13 +588,9 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
task_sequence_->GetSequenceId());
if (context_group_->use_passthrough_cmd_decoder()) {
- // When using the passthrough command decoder, only share with other
- // contexts in the explicitly requested share group.
- if (params.share_command_buffer) {
- gl_share_group_ = params.share_command_buffer->gl_share_group_;
- } else {
- gl_share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
- }
+ // When using the passthrough command decoder, never share with other
+ // contexts.
+ gl_share_group_ = base::MakeRefCounted<gl::GLShareGroup>();
} else {
// When using the validating command decoder, always use the global share
// group.
@@ -560,12 +603,13 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
return gpu::ContextResult::kFatalFailure;
}
std::unique_ptr<webgpu::WebGPUDecoder> webgpu_decoder(
- webgpu::WebGPUDecoder::Create(this, command_buffer_.get(),
- task_executor_->outputter()));
+ webgpu::WebGPUDecoder::Create(
+ this, command_buffer_.get(), task_executor_->shared_image_manager(),
+ context_group_->memory_tracker(), task_executor_->outputter()));
gpu::ContextResult result = webgpu_decoder->Initialize();
if (result != gpu::ContextResult::kSuccess) {
DestroyOnGpuThread();
- DLOG(ERROR) << "Failed to initializ WebGPUe decoder.";
+ DLOG(ERROR) << "Failed to initialize WebGPU decoder.";
return result;
}
decoder_ = std::move(webgpu_decoder);
@@ -607,20 +651,30 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
if (params.attribs.enable_raster_interface &&
!params.attribs.enable_gles2_interface) {
- context_state_ = base::MakeRefCounted<SharedContextState>(
- gl_share_group_, surface_, real_context, use_virtualized_gl_context_,
- base::DoNothing());
- context_state_->InitializeGL(task_executor_->gpu_preferences(),
- context_group_->feature_info());
gr_shader_cache_ = params.gr_shader_cache;
- context_state_->InitializeGrContext(workarounds, params.gr_shader_cache,
- params.activity_flags);
+ if (!context_state_) {
+ context_state_ = base::MakeRefCounted<SharedContextState>(
+ gl_share_group_, surface_, real_context,
+ use_virtualized_gl_context_, base::DoNothing());
+ context_state_->InitializeGL(task_executor_->gpu_preferences(),
+ context_group_->feature_info());
+ context_state_->InitializeGrContext(workarounds, params.gr_shader_cache,
+ params.activity_flags);
+ }
+
+ if (!context_state_->MakeCurrent(nullptr)) {
+ DestroyOnGpuThread();
+ LOG(ERROR) << "Failed to make context current.";
+ return ContextResult::kTransientFailure;
+ }
if (base::ThreadTaskRunnerHandle::IsSet()) {
gr_cache_controller_.emplace(context_state_.get(),
base::ThreadTaskRunnerHandle::Get());
}
+ context_ = context_state_->context();
+
decoder_.reset(raster::RasterDecoder::Create(
this, command_buffer_.get(), task_executor_->outputter(),
task_executor_->gpu_feature_info(), task_executor_->gpu_preferences(),
@@ -630,12 +684,7 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
decoder_.reset(gles2::GLES2Decoder::Create(this, command_buffer_.get(),
task_executor_->outputter(),
context_group_.get()));
- }
-
- if (use_virtualized_gl_context_) {
- if (context_state_) {
- context_ = context_state_->context();
- } else {
+ if (use_virtualized_gl_context_) {
context_ = base::MakeRefCounted<GLContextVirtual>(
gl_share_group_.get(), real_context.get(), decoder_->AsWeakPtr());
if (!context_->Initialize(surface_.get(),
@@ -649,18 +698,18 @@ gpu::ContextResult InProcessCommandBuffer::InitializeOnGpuThread(
"Failed to initialize virtual GL context.";
return gpu::ContextResult::kFatalFailure;
}
- }
- if (!context_->MakeCurrent(surface_.get())) {
- DestroyOnGpuThread();
- // The caller should retry making a context, but this one won't work.
- LOG(ERROR) << "ContextResult::kTransientFailure: "
- "Could not make context current.";
- return gpu::ContextResult::kTransientFailure;
+ if (!context_->MakeCurrent(surface_.get())) {
+ DestroyOnGpuThread();
+ // The caller should retry making a context, but this one won't work.
+ LOG(ERROR) << "ContextResult::kTransientFailure: "
+ "Could not make context current.";
+ return gpu::ContextResult::kTransientFailure;
+ }
+ } else {
+ context_ = real_context;
+ DCHECK(context_->IsCurrent(surface_.get()));
}
- } else {
- context_ = real_context;
- DCHECK(context_->IsCurrent(surface_.get()));
}
if (!context_group_->has_program_cache() &&
@@ -765,6 +814,8 @@ bool InProcessCommandBuffer::DestroyOnGpuThread() {
gl_share_group_ = nullptr;
context_group_ = nullptr;
task_sequence_ = nullptr;
+ if (context_state_)
+ context_state_->MakeCurrent(nullptr);
context_state_ = nullptr;
return true;
}
@@ -814,9 +865,8 @@ void InProcessCommandBuffer::OnParseError() {
}
}
- PostOrRunClientCallback(
- base::BindOnce(&InProcessCommandBuffer::OnContextLost,
- client_thread_weak_ptr_factory_.GetWeakPtr()));
+ PostOrRunClientCallback(base::BindOnce(&InProcessCommandBuffer::OnContextLost,
+ client_thread_weak_ptr_));
}
void InProcessCommandBuffer::OnContextLost() {
@@ -1242,9 +1292,9 @@ void InProcessCommandBuffer::ScheduleGrContextCleanup() {
void InProcessCommandBuffer::HandleReturnData(base::span<const uint8_t> data) {
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
std::vector<uint8_t> vec(data.data(), data.data() + data.size());
- PostOrRunClientCallback(base::BindOnce(
- &InProcessCommandBuffer::HandleReturnDataOnOriginThread,
- client_thread_weak_ptr_factory_.GetWeakPtr(), std::move(vec)));
+ PostOrRunClientCallback(
+ base::BindOnce(&InProcessCommandBuffer::HandleReturnDataOnOriginThread,
+ client_thread_weak_ptr_, std::move(vec)));
}
void InProcessCommandBuffer::PostOrRunClientCallback(
@@ -1375,11 +1425,15 @@ void InProcessCommandBuffer::LazyCreateSharedImageFactory() {
if (shared_image_factory_)
return;
+ // We need WrappedSkImage to support creating a SharedImage with pixel data
+ // when GL is unavailable. This is used in various unit tests.
+ const bool enable_wrapped_sk_image =
+ context_state_ && !context_state_->GrContextIsGL();
shared_image_factory_ = std::make_unique<SharedImageFactory>(
GetGpuPreferences(), context_group_->feature_info()->workarounds(),
GetGpuFeatureInfo(), context_state_.get(),
context_group_->mailbox_manager(), task_executor_->shared_image_manager(),
- image_factory_, nullptr, features::IsUsingSkiaRenderer());
+ image_factory_, nullptr, enable_wrapped_sk_image);
}
void InProcessCommandBuffer::CreateSharedImageOnGpuThread(
@@ -1392,7 +1446,7 @@ void InProcessCommandBuffer::CreateSharedImageOnGpuThread(
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
// |shared_image_factory_| never writes to the surface, so skip unnecessary
// MakeCurrent to improve performance. https://crbug.com/457431
- if (!context_->IsCurrent(nullptr) && !MakeCurrent())
+ if (context_ && !context_->IsCurrent(nullptr) && !MakeCurrent())
return;
LazyCreateSharedImageFactory();
if (!shared_image_factory_->CreateSharedImage(mailbox, format, size,
@@ -1416,7 +1470,7 @@ void InProcessCommandBuffer::CreateSharedImageWithDataOnGpuThread(
DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
// |shared_image_factory_| never writes to the surface, so skip unnecessary
// MakeCurrent to improve performance. https://crbug.com/457431
- if (!context_->IsCurrent(nullptr) && !MakeCurrent())
+ if (context_ && !context_->IsCurrent(nullptr) && !MakeCurrent())
return;
LazyCreateSharedImageFactory();
if (!shared_image_factory_->CreateSharedImage(
@@ -1522,6 +1576,19 @@ bool InProcessCommandBuffer::CanWaitUnverifiedSyncToken(
return sync_token.namespace_id() == GetNamespaceID();
}
+void InProcessCommandBuffer::SetDisplayTransform(
+ gfx::OverlayTransform transform) {
+ ScheduleGpuTask(
+ base::BindOnce(&InProcessCommandBuffer::SetDisplayTransformOnGpuThread,
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(), transform));
+}
+
+void InProcessCommandBuffer::SetDisplayTransformOnGpuThread(
+ gfx::OverlayTransform transform) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ surface_->SetDisplayTransform(transform);
+}
+
#if defined(OS_WIN)
void InProcessCommandBuffer::DidCreateAcceleratedSurfaceChildWindow(
SurfaceHandle parent_window,
@@ -1551,7 +1618,7 @@ void InProcessCommandBuffer::DidSwapBuffersComplete(
PostOrRunClientCallback(base::BindOnce(
&InProcessCommandBuffer::DidSwapBuffersCompleteOnOriginThread,
- client_thread_weak_ptr_factory_.GetWeakPtr(), base::Passed(&params)));
+ client_thread_weak_ptr_, base::Passed(&params)));
}
const gles2::FeatureInfo* InProcessCommandBuffer::GetFeatureInfo() const {
@@ -1571,10 +1638,9 @@ void InProcessCommandBuffer::BufferPresented(
SwapBufferParams params = pending_presented_params_.front();
pending_presented_params_.pop_front();
- PostOrRunClientCallback(
- base::BindOnce(&InProcessCommandBuffer::BufferPresentedOnOriginThread,
- client_thread_weak_ptr_factory_.GetWeakPtr(),
- params.swap_id, params.flags, feedback));
+ PostOrRunClientCallback(base::BindOnce(
+ &InProcessCommandBuffer::BufferPresentedOnOriginThread,
+ client_thread_weak_ptr_, params.swap_id, params.flags, feedback));
}
void InProcessCommandBuffer::AddFilter(IPC::MessageFilter* message_filter) {
@@ -1601,10 +1667,9 @@ void InProcessCommandBuffer::BufferPresentedOnOriginThread(
if (gpu_control_client_)
gpu_control_client_->OnSwapBufferPresented(swap_id, feedback);
- if (update_vsync_parameters_completion_callback_ &&
- ShouldUpdateVsyncParams(feedback)) {
- update_vsync_parameters_completion_callback_.Run(feedback.timestamp,
- feedback.interval);
+ if (update_vsync_parameters_callback_ && ShouldUpdateVsyncParams(feedback)) {
+ update_vsync_parameters_callback_.Run(feedback.timestamp,
+ feedback.interval);
}
}
@@ -1617,9 +1682,9 @@ void InProcessCommandBuffer::HandleReturnDataOnOriginThread(
}
void InProcessCommandBuffer::SetUpdateVSyncParametersCallback(
- const UpdateVSyncParametersCallback& callback) {
+ viz::UpdateVSyncParametersCallback callback) {
DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
- update_vsync_parameters_completion_callback_ = callback;
+ update_vsync_parameters_callback_ = std::move(callback);
}
void InProcessCommandBuffer::UpdateActiveUrl() {
@@ -1627,4 +1692,47 @@ void InProcessCommandBuffer::UpdateActiveUrl() {
ContextUrl::SetActiveUrl(active_url_);
}
+void InProcessCommandBuffer::SetGpuVSyncCallback(
+ viz::GpuVSyncCallback callback) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+ gpu_vsync_callback_ = std::move(callback);
+}
+
+void InProcessCommandBuffer::SetGpuVSyncEnabled(bool enabled) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+ ScheduleGpuTask(
+ base::BindOnce(&InProcessCommandBuffer::SetGpuVSyncEnabledOnThread,
+ gpu_thread_weak_ptr_factory_.GetWeakPtr(), enabled));
+}
+
+void InProcessCommandBuffer::SetGpuVSyncEnabledOnThread(bool enabled) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (surface_)
+ surface_->SetGpuVSyncEnabled(enabled);
+}
+
+viz::GpuVSyncCallback InProcessCommandBuffer::GetGpuVSyncCallback() {
+ auto handle_gpu_vsync_callback =
+ base::BindRepeating(&InProcessCommandBuffer::HandleGpuVSyncOnOriginThread,
+ client_thread_weak_ptr_);
+ auto forward_callback =
+ [](scoped_refptr<base::SequencedTaskRunner> task_runner,
+ viz::GpuVSyncCallback callback, base::TimeTicks vsync_time,
+ base::TimeDelta vsync_interval) {
+ task_runner->PostTask(
+ FROM_HERE, base::BindOnce(callback, vsync_time, vsync_interval));
+ };
+ return base::BindRepeating(forward_callback,
+ base::RetainedRef(origin_task_runner_),
+ std::move(handle_gpu_vsync_callback));
+}
+
+void InProcessCommandBuffer::HandleGpuVSyncOnOriginThread(
+ base::TimeTicks vsync_time,
+ base::TimeDelta vsync_interval) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(client_sequence_checker_);
+ if (gpu_vsync_callback_)
+ gpu_vsync_callback_.Run(vsync_time, vsync_interval);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/in_process_command_buffer.h b/chromium/gpu/ipc/in_process_command_buffer.h
index 869d11c43c7..fd2b83706e3 100644
--- a/chromium/gpu/ipc/in_process_command_buffer.h
+++ b/chromium/gpu/ipc/in_process_command_buffer.h
@@ -14,6 +14,7 @@
#include <vector>
#include "base/callback.h"
+#include "base/callback_helpers.h"
#include "base/compiler_specific.h"
#include "base/containers/queue.h"
#include "base/macros.h"
@@ -23,6 +24,8 @@
#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
+#include "components/viz/common/display/update_vsync_parameters_callback.h"
+#include "components/viz/common/gpu/gpu_vsync_callback.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/client/gpu_control.h"
#include "gpu/command_buffer/common/command_buffer.h"
@@ -99,7 +102,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
bool is_offscreen,
SurfaceHandle surface_handle,
const ContextCreationAttribs& attribs,
- InProcessCommandBuffer* share_group,
GpuMemoryBufferManager* gpu_memory_buffer_manager,
ImageFactory* image_factory,
GpuChannelManagerDelegate* gpu_channel_manager_delegate,
@@ -144,6 +146,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
base::OnceClosure callback) override;
void WaitSyncToken(const SyncToken& sync_token) override;
bool CanWaitUnverifiedSyncToken(const SyncToken& sync_token) override;
+ void SetDisplayTransform(gfx::OverlayTransform transform) override;
// CommandBufferServiceClient implementation (called on gpu thread):
CommandBatchProcessedResult OnCommandBatchProcessed() override;
@@ -171,16 +174,19 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void BufferPresented(const gfx::PresentationFeedback& feedback) override;
void AddFilter(IPC::MessageFilter* message_filter) override;
int32_t GetRouteID() const override;
+ viz::GpuVSyncCallback GetGpuVSyncCallback() override;
// Upstream this function to GpuControl if needs arise. Can be called on any
// thread.
const GpuFeatureInfo& GetGpuFeatureInfo() const;
- using UpdateVSyncParametersCallback =
- base::RepeatingCallback<void(base::TimeTicks timebase,
- base::TimeDelta interval)>;
void SetUpdateVSyncParametersCallback(
- const UpdateVSyncParametersCallback& callback);
+ viz::UpdateVSyncParametersCallback callback);
+
+ void SetGpuVSyncCallback(viz::GpuVSyncCallback callback);
+ void SetGpuVSyncEnabled(bool enabled);
+
+ void SetGpuVSyncEnabledOnThread(bool enabled);
gpu::ServiceTransferCache* GetTransferCacheForTest() const;
int GetRasterDecoderIdForTest() const;
@@ -191,6 +197,14 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
gpu::SharedImageInterface* GetSharedImageInterface() const;
+ // Provides a callback that can be used to preserve the back buffer for the
+ // GLSurface associated with the command buffer, even after the command buffer
+ // has been destroyed. The back buffer is evicted once the callback is
+ // dispatched.
+ // Note that the caller is responsible for ensuring that the |task_executor|
+ // and |surface_handle| provided in Initialize outlive this callback.
+ base::ScopedClosureRunner GetCacheBackBufferCb();
+
private:
class SharedImageInterface;
@@ -198,7 +212,6 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
SurfaceHandle surface_handle;
const ContextCreationAttribs& attribs;
Capabilities* capabilities; // Ouptut.
- InProcessCommandBuffer* share_command_buffer;
ImageFactory* image_factory;
gpu::raster::GrShaderCache* gr_shader_cache;
GpuProcessActivityFlags* activity_flags;
@@ -206,14 +219,12 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
InitializeOnGpuThreadParams(SurfaceHandle surface_handle,
const ContextCreationAttribs& attribs,
Capabilities* capabilities,
- InProcessCommandBuffer* share_command_buffer,
ImageFactory* image_factory,
gpu::raster::GrShaderCache* gr_shader_cache,
GpuProcessActivityFlags* activity_flags)
: surface_handle(surface_handle),
attribs(attribs),
capabilities(capabilities),
- share_command_buffer(share_command_buffer),
image_factory(image_factory),
gr_shader_cache(gr_shader_cache),
activity_flags(activity_flags) {}
@@ -296,6 +307,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
void UpdateSharedImageOnGpuThread(const Mailbox& mailbox,
const SyncToken& sync_token);
void DestroySharedImageOnGpuThread(const Mailbox& mailbox);
+ void SetDisplayTransformOnGpuThread(gfx::OverlayTransform transform);
// Sets |active_url_| as the active GPU process URL. Should be called on GPU
// thread only.
@@ -312,6 +324,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
const gfx::PresentationFeedback& feedback);
void HandleReturnDataOnOriginThread(std::vector<uint8_t> data);
+ void HandleGpuVSyncOnOriginThread(base::TimeTicks vsync_time,
+ base::TimeDelta vsync_interval);
const CommandBufferId command_buffer_id_;
const ContextUrl active_url_;
@@ -368,7 +382,9 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
scoped_refptr<gl::GLShareGroup> gl_share_group_;
base::WaitableEvent fence_sync_wait_event_;
- UpdateVSyncParametersCallback update_vsync_parameters_completion_callback_;
+ // Callbacks on client thread.
+ viz::UpdateVSyncParametersCallback update_vsync_parameters_callback_;
+ viz::GpuVSyncCallback gpu_vsync_callback_;
// Params pushed each time we call OnSwapBuffers, and popped when a buffer
// is presented or a swap completed.
@@ -381,6 +397,10 @@ class GL_IN_PROCESS_CONTEXT_EXPORT InProcessCommandBuffer
scoped_refptr<SharedContextState> context_state_;
+ base::WeakPtr<InProcessCommandBuffer> client_thread_weak_ptr_;
+
+ // Don't use |client_thread_weak_ptr_factory_| on GPU thread. Use the cached
+ // |client_thread_weak_ptr_| instead.
base::WeakPtrFactory<InProcessCommandBuffer> client_thread_weak_ptr_factory_;
base::WeakPtrFactory<InProcessCommandBuffer> gpu_thread_weak_ptr_factory_;
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index d07fa5c9e64..3b9c6aeaf55 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -27,8 +27,8 @@ InProcessGpuThreadHolder::InProcessGpuThreadHolder()
gpu::GPUInfo gpu_info;
gpu::CollectGraphicsInfoForTesting(&gpu_info);
- gpu::GpuFeatureInfo gpu_feature_info = gpu::ComputeGpuFeatureInfo(
- gpu_info, gpu_preferences_, command_line, nullptr);
+ gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info, gpu_preferences_,
+ command_line, nullptr);
Start();
}
@@ -73,7 +73,7 @@ void InProcessGpuThreadHolder::InitializeOnGpuThread(
task_executor_ = std::make_unique<GpuInProcessThreadService>(
task_runner(), scheduler_.get(), sync_point_manager_.get(),
mailbox_manager_.get(), nullptr, gl::GLSurfaceFormat(), gpu_feature_info_,
- gpu_preferences_, shared_image_manager_.get(), nullptr);
+ gpu_preferences_, shared_image_manager_.get(), nullptr, nullptr);
completion->Signal();
}
diff --git a/chromium/gpu/ipc/raster_in_process_context.cc b/chromium/gpu/ipc/raster_in_process_context.cc
index 7e5ff70dddc..813a19d8f5c 100644
--- a/chromium/gpu/ipc/raster_in_process_context.cc
+++ b/chromium/gpu/ipc/raster_in_process_context.cc
@@ -65,9 +65,9 @@ ContextResult RasterInProcessContext::Initialize(
std::make_unique<InProcessCommandBuffer>(task_executor, GURL());
auto result = command_buffer_->Initialize(
nullptr /* surface */, true /* is_offscreen */, kNullSurfaceHandle,
- attribs, nullptr /* share_command_buffer */, gpu_memory_buffer_manager,
- image_factory, gpu_channel_manager_delegate, client_task_runner_,
- gr_shader_cache, activity_flags);
+ attribs, gpu_memory_buffer_manager, image_factory,
+ gpu_channel_manager_delegate, client_task_runner_, gr_shader_cache,
+ activity_flags);
if (result != ContextResult::kSuccess) {
DLOG(ERROR) << "Failed to initialize InProcessCommmandBuffer";
return result;
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index 4e4c4ac778e..075753dcfcf 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -81,21 +81,10 @@ jumbo_component("service") {
ldflags = []
if (is_win) {
sources += [
- "child_window_win.cc",
- "child_window_win.h",
- "direct_composition_child_surface_win.cc",
- "direct_composition_child_surface_win.h",
- "direct_composition_surface_win.cc",
- "direct_composition_surface_win.h",
"gpu_memory_buffer_factory_dxgi.cc",
"gpu_memory_buffer_factory_dxgi.h",
"image_transport_surface_win.cc",
]
- libs += [
- "dxgi.lib",
- "dwmapi.lib",
- ]
- ldflags += [ "/DELAYLOAD:dxgi.dll" ]
}
if (is_mac) {
sources += [
diff --git a/chromium/gpu/ipc/service/DEPS b/chromium/gpu/ipc/service/DEPS
index 2a760c78595..c6f81396389 100644
--- a/chromium/gpu/ipc/service/DEPS
+++ b/chromium/gpu/ipc/service/DEPS
@@ -2,6 +2,8 @@ include_rules = [
"+cc/paint",
"+components/crash/core/common/crash_key.h",
"+components/viz/common/features.h",
+ "+components/viz/common/gpu/gpu_vsync_callback.h",
+ "+components/viz/common/gpu/vulkan_context_provider.h",
"+components/viz/common/resources/resource_format.h",
"+third_party/skia",
"+ui/accelerated_widget_mac",
diff --git a/chromium/gpu/ipc/service/child_window_win.cc b/chromium/gpu/ipc/service/child_window_win.cc
deleted file mode 100644
index 289e39ff66f..00000000000
--- a/chromium/gpu/ipc/service/child_window_win.cc
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/service/child_window_win.h"
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/compiler_specific.h"
-#include "base/memory/weak_ptr.h"
-#include "base/message_loop/message_loop.h"
-#include "base/win/scoped_hdc.h"
-#include "base/win/wrapped_window_proc.h"
-#include "gpu/ipc/common/gpu_messages.h"
-#include "gpu/ipc/service/gpu_channel_manager.h"
-#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/win/hwnd_util.h"
-#include "ui/gfx/win/window_impl.h"
-
-namespace gpu {
-
-namespace {
-
-ATOM g_window_class;
-
-// This runs on the window owner thread.
-void InitializeWindowClass() {
- if (g_window_class)
- return;
-
- WNDCLASSEX intermediate_class;
- base::win::InitializeWindowClass(
- L"Intermediate D3D Window",
- &base::win::WrappedWindowProc<::DefWindowProc>, CS_OWNDC, 0, 0, nullptr,
- reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)), nullptr, nullptr,
- nullptr, &intermediate_class);
- g_window_class = RegisterClassEx(&intermediate_class);
- if (!g_window_class) {
- LOG(ERROR) << "RegisterClass failed.";
- return;
- }
-}
-
-// Hidden popup window used as a parent for the child surface window.
-// Must be created and destroyed on the thread.
-class HiddenPopupWindow : public gfx::WindowImpl {
- public:
- static HWND Create() {
- gfx::WindowImpl* window = new HiddenPopupWindow;
-
- window->set_window_style(WS_POPUP);
- window->set_window_ex_style(WS_EX_TOOLWINDOW);
- window->Init(GetDesktopWindow(), gfx::Rect());
- EnableWindow(window->hwnd(), FALSE);
- // The |window| instance is now owned by the window user data.
- DCHECK_EQ(window, gfx::GetWindowUserData(window->hwnd()));
- return window->hwnd();
- }
-
- static void Destroy(HWND window) {
- // This uses the fact that the window user data contains a pointer
- // to gfx::WindowImpl instance.
- gfx::WindowImpl* window_data =
- reinterpret_cast<gfx::WindowImpl*>(gfx::GetWindowUserData(window));
- DCHECK_EQ(window, window_data->hwnd());
- DestroyWindow(window);
- delete window_data;
- }
-
- private:
- // Explicitly do nothing in Close. We do this as some external apps may get a
- // handle to this window and attempt to close it.
- void OnClose() {}
-
- CR_BEGIN_MSG_MAP_EX(HiddenPopupWindow)
- CR_MSG_WM_CLOSE(OnClose)
- CR_END_MSG_MAP()
-
- CR_MSG_MAP_CLASS_DECLARATIONS(HiddenPopupWindow)
-};
-
-// This runs on the window owner thread.
-void CreateWindowsOnThread(const gfx::Size& size,
- base::WaitableEvent* event,
- HWND* child_window,
- HWND* parent_window) {
- InitializeWindowClass();
- DCHECK(g_window_class);
-
- // Create hidden parent window on the current thread.
- *parent_window = HiddenPopupWindow::Create();
- // Create child window.
- // WS_EX_NOPARENTNOTIFY and WS_EX_LAYERED make the window transparent for
- // input. WS_EX_NOREDIRECTIONBITMAP avoids allocating a
- // bitmap that would otherwise be allocated with WS_EX_LAYERED, the bitmap is
- // only necessary if using Gdi objects with the window.
- HWND window = CreateWindowEx(
- WS_EX_NOPARENTNOTIFY | WS_EX_LAYERED | WS_EX_TRANSPARENT |
- WS_EX_NOREDIRECTIONBITMAP,
- reinterpret_cast<wchar_t*>(g_window_class), L"",
- WS_CHILDWINDOW | WS_DISABLED | WS_VISIBLE, 0, 0, size.width(),
- size.height(), *parent_window, nullptr, nullptr, nullptr);
- CHECK(window);
- *child_window = window;
- event->Signal();
-}
-
-// This runs on the main thread after the window was destroyed on window owner
-// thread.
-void DestroyThread(std::unique_ptr<base::Thread> thread) {
- thread->Stop();
-}
-
-// This runs on the window owner thread.
-void DestroyWindowsOnThread(HWND child_window, HWND hidden_popup_window) {
- DestroyWindow(child_window);
- HiddenPopupWindow::Destroy(hidden_popup_window);
-}
-
-} // namespace
-
-ChildWindowWin::ChildWindowWin(
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- HWND parent_window)
- : parent_window_(parent_window), window_(nullptr), delegate_(delegate) {}
-
-bool ChildWindowWin::Initialize() {
- if (window_)
- return true;
-
- thread_ = std::make_unique<base::Thread>("Window owner thread");
- base::Thread::Options options(base::MessageLoop::TYPE_UI, 0);
- thread_->StartWithOptions(options);
-
- base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
-
- RECT window_rect;
- GetClientRect(parent_window_, &window_rect);
-
- thread_->task_runner()->PostTask(
- FROM_HERE,
- base::BindOnce(&CreateWindowsOnThread, gfx::Rect(window_rect).size(),
- &event, &window_, &initial_parent_window_));
- event.Wait();
-
- delegate_->DidCreateAcceleratedSurfaceChildWindow(parent_window_, window_);
- return true;
-}
-
-ChildWindowWin::~ChildWindowWin() {
- if (thread_) {
- scoped_refptr<base::TaskRunner> task_runner = thread_->task_runner();
- task_runner->PostTaskAndReply(
- FROM_HERE,
- base::BindOnce(&DestroyWindowsOnThread, window_,
- initial_parent_window_),
- base::BindOnce(&DestroyThread, base::Passed(std::move(thread_))));
- }
-}
-
-scoped_refptr<base::TaskRunner> ChildWindowWin::GetTaskRunnerForTesting() {
- DCHECK(thread_);
- return thread_->task_runner();
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/service/child_window_win.h b/chromium/gpu/ipc/service/child_window_win.h
deleted file mode 100644
index 2b29fc641a8..00000000000
--- a/chromium/gpu/ipc/service/child_window_win.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
-#define GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
-
-#include "base/memory/weak_ptr.h"
-#include "base/task_runner.h"
-#include "base/threading/thread.h"
-#include "gpu/ipc/service/image_transport_surface_delegate.h"
-
-#include <windows.h>
-
-namespace gpu {
-
-// The window DirectComposition renders into needs to be owned by the process
-// that's currently doing the rendering. The class creates and owns a window
-// which is reparented by the browser to be a child of its window.
-class ChildWindowWin {
- public:
- ChildWindowWin(base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- HWND parent_window);
- ~ChildWindowWin();
-
- bool Initialize();
- HWND window() const { return window_; }
-
- scoped_refptr<base::TaskRunner> GetTaskRunnerForTesting();
-
- private:
- // The window owner thread.
- std::unique_ptr<base::Thread> thread_;
- // The eventual parent of the window living in the browser process.
- HWND parent_window_;
- HWND window_;
- // The window is initially created with this parent window. We need to keep it
- // around so that we can destroy it at the end.
- HWND initial_parent_window_;
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate_;
-
- DISALLOW_COPY_AND_ASSIGN(ChildWindowWin);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_SERVICE_CHILD_WINDOW_WIN_H_
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
deleted file mode 100644
index 68b7ba98004..00000000000
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.cc
+++ /dev/null
@@ -1,447 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/service/direct_composition_child_surface_win.h"
-
-#include <d3d11_1.h>
-#include <dcomptypes.h>
-
-#include "base/macros.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/trace_event/trace_event.h"
-#include "base/win/windows_version.h"
-#include "ui/display/display_switches.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gl/egl_util.h"
-#include "ui/gl/gl_angle_util_win.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/scoped_make_current.h"
-
-#ifndef EGL_ANGLE_flexible_surface_compatibility
-#define EGL_ANGLE_flexible_surface_compatibility 1
-#define EGL_FLEXIBLE_SURFACE_COMPATIBILITY_SUPPORTED_ANGLE 0x33A6
-#endif /* EGL_ANGLE_flexible_surface_compatibility */
-
-#ifndef EGL_ANGLE_d3d_texture_client_buffer
-#define EGL_ANGLE_d3d_texture_client_buffer 1
-#define EGL_D3D_TEXTURE_ANGLE 0x33A3
-#endif /* EGL_ANGLE_d3d_texture_client_buffer */
-
-namespace gpu {
-
-namespace {
-// Only one DirectComposition surface can be rendered into at a time. Track
-// here which IDCompositionSurface is being rendered into. If another context
-// is made current, then this surface will be suspended.
-IDCompositionSurface* g_current_surface;
-
-// Returns true if swap chain tearing is supported.
-bool IsSwapChainTearingSupported() {
- static const bool supported = [] {
- // Swap chain tearing is used only if vsync is disabled explicitly.
- if (!base::CommandLine::ForCurrentProcess()->HasSwitch(
- switches::kDisableGpuVsync))
- return false;
-
- // Swap chain tearing is supported only on Windows 10 Anniversary Edition
- // (Redstone 1) and above.
- if (base::win::GetVersion() < base::win::VERSION_WIN10_RS1)
- return false;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
- if (!d3d11_device) {
- DLOG(ERROR) << "Not using swap chain tearing because failed to retrieve "
- "D3D11 device from ANGLE";
- return false;
- }
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- d3d11_device.As(&dxgi_device);
- DCHECK(dxgi_device);
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- dxgi_device->GetAdapter(&dxgi_adapter);
- DCHECK(dxgi_adapter);
- Microsoft::WRL::ComPtr<IDXGIFactory5> dxgi_factory;
- if (FAILED(dxgi_adapter->GetParent(IID_PPV_ARGS(&dxgi_factory)))) {
- DLOG(ERROR) << "Not using swap chain tearing because failed to retrieve "
- "IDXGIFactory5 interface";
- return false;
- }
-
- BOOL present_allow_tearing = FALSE;
- DCHECK(dxgi_factory);
- if (FAILED(dxgi_factory->CheckFeatureSupport(
- DXGI_FEATURE_PRESENT_ALLOW_TEARING, &present_allow_tearing,
- sizeof(present_allow_tearing)))) {
- DLOG(ERROR)
- << "Not using swap chain tearing because CheckFeatureSupport failed";
- return false;
- }
- return !!present_allow_tearing;
- }();
- return supported;
-}
-
-} // namespace
-
-DirectCompositionChildSurfaceWin::DirectCompositionChildSurfaceWin() = default;
-
-DirectCompositionChildSurfaceWin::~DirectCompositionChildSurfaceWin() {
- Destroy();
-}
-
-bool DirectCompositionChildSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
- d3d11_device_ = gl::QueryD3D11DeviceObjectFromANGLE();
- dcomp_device_ = gl::QueryDirectCompositionDevice(d3d11_device_);
- if (!dcomp_device_)
- return false;
-
- EGLDisplay display = GetDisplay();
-
- EGLint pbuffer_attribs[] = {
- EGL_WIDTH,
- 1,
- EGL_HEIGHT,
- 1,
- EGL_FLEXIBLE_SURFACE_COMPATIBILITY_SUPPORTED_ANGLE,
- EGL_TRUE,
- EGL_NONE,
- };
-
- default_surface_ =
- eglCreatePbufferSurface(display, GetConfig(), pbuffer_attribs);
- if (!default_surface_) {
- DLOG(ERROR) << "eglCreatePbufferSurface failed with error "
- << ui::GetLastEGLErrorString();
- return false;
- }
-
- return true;
-}
-
-bool DirectCompositionChildSurfaceWin::ReleaseDrawTexture(bool will_discard) {
- // At the end we'll MakeCurrent the same surface but its handle will be
- // |default_surface_|.
- ui::ScopedReleaseCurrent release_current;
-
- if (real_surface_) {
- eglDestroySurface(GetDisplay(), real_surface_);
- real_surface_ = nullptr;
- }
-
- if (dcomp_surface_.Get() == g_current_surface)
- g_current_surface = nullptr;
-
- if (draw_texture_) {
- draw_texture_.Reset();
- if (dcomp_surface_) {
- HRESULT hr = dcomp_surface_->EndDraw();
- if (FAILED(hr)) {
- DLOG(ERROR) << "EndDraw failed with error " << std::hex << hr;
- return false;
- }
- dcomp_surface_serial_++;
- } else if (!will_discard) {
- bool allow_tearing = IsSwapChainTearingSupported();
- UINT interval = first_swap_ || !vsync_enabled_ || allow_tearing ? 0 : 1;
- UINT flags = allow_tearing ? DXGI_PRESENT_ALLOW_TEARING : 0;
- DXGI_PRESENT_PARAMETERS params = {};
- RECT dirty_rect = swap_rect_.ToRECT();
- params.DirtyRectsCount = 1;
- params.pDirtyRects = &dirty_rect;
- HRESULT hr = swap_chain_->Present1(interval, flags, &params);
- // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only
- // indicates that the window is occluded and we can stop rendering.
- if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
- DLOG(ERROR) << "Present1 failed with error " << std::hex << hr;
- return false;
- }
- if (first_swap_) {
- // Wait for the GPU to finish executing its commands before
- // committing the DirectComposition tree, or else the swapchain
- // may flicker black when it's first presented.
- first_swap_ = false;
- Microsoft::WRL::ComPtr<IDXGIDevice2> dxgi_device2;
- d3d11_device_.As(&dxgi_device2);
- DCHECK(dxgi_device2);
- base::WaitableEvent event(
- base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- hr = dxgi_device2->EnqueueSetEvent(event.handle());
- DCHECK(SUCCEEDED(hr));
- event.Wait();
- }
- }
- }
- return true;
-}
-
-void DirectCompositionChildSurfaceWin::Destroy() {
- if (default_surface_) {
- if (!eglDestroySurface(GetDisplay(), default_surface_)) {
- DLOG(ERROR) << "eglDestroySurface failed with error "
- << ui::GetLastEGLErrorString();
- }
- default_surface_ = nullptr;
- }
- if (real_surface_) {
- if (!eglDestroySurface(GetDisplay(), real_surface_)) {
- DLOG(ERROR) << "eglDestroySurface failed with error "
- << ui::GetLastEGLErrorString();
- }
- real_surface_ = nullptr;
- }
- if (dcomp_surface_ && (dcomp_surface_.Get() == g_current_surface)) {
- HRESULT hr = dcomp_surface_->EndDraw();
- if (FAILED(hr))
- DLOG(ERROR) << "EndDraw failed with error " << std::hex << hr;
- g_current_surface = nullptr;
- }
- draw_texture_.Reset();
- dcomp_surface_.Reset();
-}
-
-gfx::Size DirectCompositionChildSurfaceWin::GetSize() {
- return size_;
-}
-
-bool DirectCompositionChildSurfaceWin::IsOffscreen() {
- return false;
-}
-
-void* DirectCompositionChildSurfaceWin::GetHandle() {
- return real_surface_ ? real_surface_ : default_surface_;
-}
-
-gfx::SwapResult DirectCompositionChildSurfaceWin::SwapBuffers(
- PresentationCallback callback) {
- TRACE_EVENT1("gpu", "DirectCompositionChildSurfaceWin::SwapBuffers", "size",
- size_.ToString());
- // PresentationCallback is handled by DirectCompositionSurfaceWin. The child
- // surface doesn't need provide presentation feedback.
- DCHECK(!callback);
- if (!ReleaseDrawTexture(false /* will_discard */))
- return gfx::SwapResult::SWAP_FAILED;
- return gfx::SwapResult::SWAP_ACK;
-}
-
-bool DirectCompositionChildSurfaceWin::FlipsVertically() const {
- return true;
-}
-
-bool DirectCompositionChildSurfaceWin::SupportsPostSubBuffer() {
- return true;
-}
-
-bool DirectCompositionChildSurfaceWin::OnMakeCurrent(gl::GLContext* context) {
- if (g_current_surface != dcomp_surface_.Get()) {
- if (g_current_surface) {
- HRESULT hr = g_current_surface->SuspendDraw();
- if (FAILED(hr)) {
- DLOG(ERROR) << "SuspendDraw failed with error " << std::hex << hr;
- return false;
- }
- g_current_surface = nullptr;
- }
- if (draw_texture_) {
- HRESULT hr = dcomp_surface_->ResumeDraw();
- if (FAILED(hr)) {
- DLOG(ERROR) << "ResumeDraw failed with error " << std::hex << hr;
- return false;
- }
- g_current_surface = dcomp_surface_.Get();
- }
- }
- return true;
-}
-
-bool DirectCompositionChildSurfaceWin::SupportsDCLayers() const {
- return true;
-}
-
-bool DirectCompositionChildSurfaceWin::SetDrawRectangle(
- const gfx::Rect& rectangle) {
- if (!gfx::Rect(size_).Contains(rectangle)) {
- DLOG(ERROR) << "Draw rectangle must be contained within size of surface";
- return false;
- }
-
- if (draw_texture_) {
- DLOG(ERROR) << "SetDrawRectangle must be called only once per swap buffers";
- return false;
- }
- DCHECK(!real_surface_);
- DCHECK(!g_current_surface);
-
- if (gfx::Rect(size_) != rectangle && !swap_chain_ && !dcomp_surface_) {
- DLOG(ERROR) << "First draw to surface must draw to everything";
- return false;
- }
-
- // At the end we'll MakeCurrent the same surface but its handle will be
- // |real_surface_|.
- ui::ScopedReleaseCurrent release_current;
-
- DXGI_FORMAT output_format =
- is_hdr_ ? DXGI_FORMAT_R16G16B16A16_FLOAT : DXGI_FORMAT_B8G8R8A8_UNORM;
- if (enable_dc_layers_ && !dcomp_surface_) {
- TRACE_EVENT2("gpu", "DirectCompositionChildSurfaceWin::CreateSurface",
- "width", size_.width(), "height", size_.height());
- swap_chain_.Reset();
- // Always treat as premultiplied, because an underlay could cause it to
- // become transparent.
- HRESULT hr = dcomp_device_->CreateSurface(
- size_.width(), size_.height(), output_format,
- DXGI_ALPHA_MODE_PREMULTIPLIED, &dcomp_surface_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateSurface failed with error " << std::hex << hr;
- return false;
- }
- } else if (!enable_dc_layers_ && !swap_chain_) {
- TRACE_EVENT2("gpu", "DirectCompositionChildSurfaceWin::CreateSwapChain",
- "width", size_.width(), "height", size_.height());
- dcomp_surface_.Reset();
-
- DXGI_ALPHA_MODE alpha_mode =
- has_alpha_ ? DXGI_ALPHA_MODE_PREMULTIPLIED : DXGI_ALPHA_MODE_IGNORE;
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- d3d11_device_.As(&dxgi_device);
- DCHECK(dxgi_device);
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- dxgi_device->GetAdapter(&dxgi_adapter);
- DCHECK(dxgi_adapter);
- Microsoft::WRL::ComPtr<IDXGIFactory2> dxgi_factory;
- dxgi_adapter->GetParent(IID_PPV_ARGS(&dxgi_factory));
- DCHECK(dxgi_factory);
-
- DXGI_SWAP_CHAIN_DESC1 desc = {};
- desc.Width = size_.width();
- desc.Height = size_.height();
- desc.Format = output_format;
- desc.Stereo = FALSE;
- desc.SampleDesc.Count = 1;
- desc.BufferCount = 2;
- desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
- desc.Scaling = DXGI_SCALING_STRETCH;
- desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
- desc.AlphaMode = alpha_mode;
- desc.Flags =
- IsSwapChainTearingSupported() ? DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING : 0;
- HRESULT hr = dxgi_factory->CreateSwapChainForComposition(
- d3d11_device_.Get(), &desc, nullptr, &swap_chain_);
- first_swap_ = true;
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateSwapChainForComposition failed with error "
- << std::hex << hr;
- return false;
- }
- }
-
- swap_rect_ = rectangle;
- draw_offset_ = gfx::Vector2d();
-
- if (dcomp_surface_) {
- POINT update_offset;
- const RECT rect = rectangle.ToRECT();
- HRESULT hr = dcomp_surface_->BeginDraw(&rect, IID_PPV_ARGS(&draw_texture_),
- &update_offset);
- if (FAILED(hr)) {
- DLOG(ERROR) << "BeginDraw failed with error " << std::hex << hr;
- return false;
- }
- draw_offset_ = gfx::Point(update_offset) - rectangle.origin();
- } else {
- swap_chain_->GetBuffer(0, IID_PPV_ARGS(&draw_texture_));
- }
- DCHECK(draw_texture_);
-
- g_current_surface = dcomp_surface_.Get();
-
- EGLint pbuffer_attribs[] = {
- EGL_WIDTH,
- size_.width(),
- EGL_HEIGHT,
- size_.height(),
- EGL_FLEXIBLE_SURFACE_COMPATIBILITY_SUPPORTED_ANGLE,
- EGL_TRUE,
- EGL_NONE,
- };
-
- EGLClientBuffer buffer =
- reinterpret_cast<EGLClientBuffer>(draw_texture_.Get());
- real_surface_ =
- eglCreatePbufferFromClientBuffer(GetDisplay(), EGL_D3D_TEXTURE_ANGLE,
- buffer, GetConfig(), pbuffer_attribs);
- if (!real_surface_) {
- DLOG(ERROR) << "eglCreatePbufferFromClientBuffer failed with error "
- << ui::GetLastEGLErrorString();
- return false;
- }
-
- return true;
-}
-
-gfx::Vector2d DirectCompositionChildSurfaceWin::GetDrawOffset() const {
- return draw_offset_;
-}
-
-void DirectCompositionChildSurfaceWin::SetVSyncEnabled(bool enabled) {
- vsync_enabled_ = enabled;
-}
-
-bool DirectCompositionChildSurfaceWin::Resize(const gfx::Size& size,
- float scale_factor,
- ColorSpace color_space,
- bool has_alpha) {
- bool size_changed = size != size_;
- bool is_hdr = color_space == ColorSpace::SCRGB_LINEAR;
- bool hdr_changed = is_hdr != is_hdr_;
- bool alpha_changed = has_alpha != has_alpha_;
- if (!size_changed && !hdr_changed && !alpha_changed)
- return true;
-
- // This will release indirect references to swap chain (|real_surface_|) by
- // binding |default_surface_| as the default framebuffer.
- if (!ReleaseDrawTexture(true /* will_discard */))
- return false;
-
- size_ = size;
- is_hdr_ = is_hdr;
- has_alpha_ = has_alpha;
-
- // ResizeBuffers can't change alpha blending mode.
- if (swap_chain_ && !alpha_changed) {
- DXGI_FORMAT format =
- is_hdr_ ? DXGI_FORMAT_R16G16B16A16_FLOAT : DXGI_FORMAT_B8G8R8A8_UNORM;
- UINT flags =
- IsSwapChainTearingSupported() ? DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING : 0;
- HRESULT hr = swap_chain_->ResizeBuffers(2 /* BufferCount */, size.width(),
- size.height(), format, flags);
- UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.SwapChainResizeResult",
- SUCCEEDED(hr));
- if (SUCCEEDED(hr))
- return true;
- DLOG(ERROR) << "ResizeBuffers failed with error 0x" << std::hex << hr;
- }
- // Next SetDrawRectangle call will recreate the swap chain or surface.
- swap_chain_.Reset();
- dcomp_surface_.Reset();
- return true;
-}
-
-bool DirectCompositionChildSurfaceWin::SetEnableDCLayers(bool enable) {
- if (enable_dc_layers_ == enable)
- return true;
- enable_dc_layers_ = enable;
- // Next SetDrawRectangle call will recreate the swap chain or surface.
- if (!ReleaseDrawTexture(true /* will_discard */))
- return false;
- swap_chain_.Reset();
- dcomp_surface_.Reset();
- return true;
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h b/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
deleted file mode 100644
index 52cbce9099e..00000000000
--- a/chromium/gpu/ipc/service/direct_composition_child_surface_win.h
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_SERVICE_DIRECT_COMPOSITION_CHILD_SURFACE_WIN_H_
-#define GPU_IPC_SERVICE_DIRECT_COMPOSITION_CHILD_SURFACE_WIN_H_
-
-#include <windows.h>
-#include <d3d11.h>
-#include <dcomp.h>
-#include <wrl/client.h>
-
-#include "gpu/ipc/service/gpu_ipc_service_export.h"
-#include "ui/gl/gl_surface_egl.h"
-
-namespace gpu {
-
-class GPU_IPC_SERVICE_EXPORT DirectCompositionChildSurfaceWin
- : public gl::GLSurfaceEGL {
- public:
- DirectCompositionChildSurfaceWin();
-
- // GLSurfaceEGL implementation.
- bool Initialize(gl::GLSurfaceFormat format) override;
- void Destroy() override;
- gfx::Size GetSize() override;
- bool IsOffscreen() override;
- void* GetHandle() override;
- gfx::SwapResult SwapBuffers(PresentationCallback callback) override;
- bool FlipsVertically() const override;
- bool SupportsPostSubBuffer() override;
- bool OnMakeCurrent(gl::GLContext* context) override;
- bool SupportsDCLayers() const override;
- bool SetDrawRectangle(const gfx::Rect& rect) override;
- gfx::Vector2d GetDrawOffset() const override;
- void SetVSyncEnabled(bool enabled) override;
- bool Resize(const gfx::Size& size,
- float scale_factor,
- ColorSpace color_space,
- bool has_alpha) override;
- bool SetEnableDCLayers(bool enable) override;
-
- const Microsoft::WRL::ComPtr<IDCompositionSurface>& dcomp_surface() const {
- return dcomp_surface_;
- }
-
- const Microsoft::WRL::ComPtr<IDXGISwapChain1>& swap_chain() const {
- return swap_chain_;
- }
-
- uint64_t dcomp_surface_serial() const { return dcomp_surface_serial_; }
-
- protected:
- ~DirectCompositionChildSurfaceWin() override;
-
- private:
- // Release the texture that's currently being drawn to. If will_discard is
- // true then the surface should be discarded without swapping any contents
- // to it. Returns false if this fails.
- bool ReleaseDrawTexture(bool will_discard);
-
- gfx::Size size_ = gfx::Size(1, 1);
- bool enable_dc_layers_ = false;
- bool is_hdr_ = false;
- bool has_alpha_ = true;
- bool vsync_enabled_ = true;
-
- // This is a placeholder surface used when not rendering to the
- // DirectComposition surface.
- EGLSurface default_surface_ = 0;
-
- // This is the real surface representing the backbuffer. It may be null
- // outside of a BeginDraw/EndDraw pair.
- EGLSurface real_surface_ = 0;
- bool first_swap_ = true;
- gfx::Rect swap_rect_;
- gfx::Vector2d draw_offset_;
-
- // This is a number that increments once for every EndDraw on a surface, and
- // is used to determine when the contents have changed so Commit() needs to
- // be called on the device.
- uint64_t dcomp_surface_serial_ = 0;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device_;
- Microsoft::WRL::ComPtr<IDCompositionSurface> dcomp_surface_;
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
- Microsoft::WRL::ComPtr<ID3D11Texture2D> draw_texture_;
-
- DISALLOW_COPY_AND_ASSIGN(DirectCompositionChildSurfaceWin);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_SERVICE_DIRECT_COMPOSITION_CHILD_SURFACE_WIN_H_
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.cc b/chromium/gpu/ipc/service/direct_composition_surface_win.cc
deleted file mode 100644
index f95a789b3cb..00000000000
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.cc
+++ /dev/null
@@ -1,2200 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/service/direct_composition_surface_win.h"
-
-#include <d3d11_1.h>
-#include <dcomptypes.h>
-#include <dxgi1_6.h>
-
-#include <utility>
-
-#include "base/containers/circular_deque.h"
-#include "base/feature_list.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/strings/stringprintf.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/trace_event/trace_event.h"
-#include "base/win/scoped_handle.h"
-#include "base/win/windows_types.h"
-#include "base/win/windows_version.h"
-#include "components/crash/core/common/crash_key.h"
-#include "gpu/command_buffer/service/feature_info.h"
-#include "gpu/config/gpu_finch_features.h"
-#include "gpu/ipc/service/direct_composition_child_surface_win.h"
-#include "gpu/ipc/service/gpu_channel_manager.h"
-#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
-#include "ui/display/display_switches.h"
-#include "ui/gfx/color_space_win.h"
-#include "ui/gfx/geometry/rect_conversions.h"
-#include "ui/gfx/geometry/size_conversions.h"
-#include "ui/gfx/native_widget_types.h"
-#include "ui/gfx/transform.h"
-#include "ui/gl/dc_renderer_layer_params.h"
-#include "ui/gl/egl_util.h"
-#include "ui/gl/gl_angle_util_win.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_image_dxgi.h"
-#include "ui/gl/gl_image_memory.h"
-#include "ui/gl/gl_surface_egl.h"
-#include "ui/gl/gl_surface_presentation_helper.h"
-
-#ifndef EGL_ANGLE_flexible_surface_compatibility
-#define EGL_ANGLE_flexible_surface_compatibility 1
-#define EGL_FLEXIBLE_SURFACE_COMPATIBILITY_SUPPORTED_ANGLE 0x33A6
-#endif /* EGL_ANGLE_flexible_surface_compatibility */
-
-namespace gpu {
-namespace {
-// Some drivers fail to correctly handle BT.709 video in overlays. This flag
-// converts them to BT.601 in the video processor.
-const base::Feature kFallbackBT709VideoToBT601{
- "FallbackBT709VideoToBT601", base::FEATURE_DISABLED_BY_DEFAULT};
-
-bool SizeContains(const gfx::Size& a, const gfx::Size& b) {
- return gfx::Rect(a).Contains(gfx::Rect(b));
-}
-
-bool IsProtectedVideo(ui::ProtectedVideoType protected_video_type) {
- return protected_video_type != ui::ProtectedVideoType::kClear;
-}
-
-// This keeps track of whether the previous 30 frames used Overlays or GPU
-// composition to present.
-class PresentationHistory {
- public:
- static const int kPresentsToStore = 30;
-
- PresentationHistory() {}
-
- void AddSample(DXGI_FRAME_PRESENTATION_MODE mode) {
- if (mode == DXGI_FRAME_PRESENTATION_MODE_COMPOSED)
- composed_count_++;
-
- presents_.push_back(mode);
- if (presents_.size() > kPresentsToStore) {
- DXGI_FRAME_PRESENTATION_MODE first_mode = presents_.front();
- if (first_mode == DXGI_FRAME_PRESENTATION_MODE_COMPOSED)
- composed_count_--;
- presents_.pop_front();
- }
- }
-
- bool valid() const { return presents_.size() >= kPresentsToStore; }
- int composed_count() const { return composed_count_; }
-
- private:
- base::circular_deque<DXGI_FRAME_PRESENTATION_MODE> presents_;
- int composed_count_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(PresentationHistory);
-};
-
-class ScopedReleaseKeyedMutex {
- public:
- ScopedReleaseKeyedMutex(Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex,
- UINT64 key)
- : keyed_mutex_(keyed_mutex), key_(key) {
- DCHECK(keyed_mutex);
- }
-
- ~ScopedReleaseKeyedMutex() {
- HRESULT hr = keyed_mutex_->ReleaseSync(key_);
- DCHECK(SUCCEEDED(hr));
- }
-
- private:
- Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex_;
- UINT64 key_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedReleaseKeyedMutex);
-};
-
-struct OverlaySupportInfo {
- OverlayFormat overlay_format;
- DXGI_FORMAT dxgi_format;
- UINT flags;
-};
-
-// Indicates if overlay support has been initialized.
-bool g_overlay_support_initialized = false;
-
-// Indicates support for either NV12 or YUY2 hardware overlays.
-bool g_supports_overlays = false;
-
-// Indicates support for hardware overlay scaling.
-bool g_supports_scaled_overlays = true;
-
-// Used for workaround limiting overlay size to monitor size.
-gfx::Size g_overlay_monitor_size;
-
-// Preferred overlay format set when detecting hardware overlay support during
-// initialization. Set to NV12 by default so that it's used when enabling
-// overlays using command line flags.
-OverlayFormat g_overlay_format_used = OverlayFormat::kNV12;
-DXGI_FORMAT g_overlay_dxgi_format_used = DXGI_FORMAT_NV12;
-
-// This is the raw support info, which shouldn't depend on field trial state, or
-// command line flags. Ordered by most preferred to least preferred format.
-OverlaySupportInfo g_overlay_support_info[] = {
- {OverlayFormat::kNV12, DXGI_FORMAT_NV12, 0},
- {OverlayFormat::kYUY2, DXGI_FORMAT_YUY2, 0},
- {OverlayFormat::kBGRA, DXGI_FORMAT_B8G8R8A8_UNORM, 0},
-};
-
-// These values are persisted to logs. Entries should not be renumbered and
-// numeric values should never be reused.
-enum class OverlayFullScreenTypes {
- kWindowMode,
- kFullScreenMode,
- kFullScreenInWidthOnly,
- kFullScreenInHeightOnly,
- kOverSizedFullScreen,
- kNotAvailable,
- kMaxValue = kNotAvailable,
-};
-
-void RecordOverlayFullScreenTypes(bool workaround_applied,
- const gfx::Rect& overlay_onscreen_rect) {
- OverlayFullScreenTypes full_screen_type;
- const gfx::Size& screen_size = g_overlay_monitor_size;
- const gfx::Size& overlay_onscreen_size = overlay_onscreen_rect.size();
- const gfx::Point& origin = overlay_onscreen_rect.origin();
-
- // The kFullScreenInWidthOnly type might be over counted, it's possible the
- // video width fits the screen but it's still in a window mode.
- if (screen_size.IsEmpty()) {
- full_screen_type = OverlayFullScreenTypes::kNotAvailable;
- } else if (origin.IsOrigin() && overlay_onscreen_size == screen_size)
- full_screen_type = OverlayFullScreenTypes::kFullScreenMode;
- else if (overlay_onscreen_size.width() > screen_size.width() ||
- overlay_onscreen_size.height() > screen_size.height()) {
- full_screen_type = OverlayFullScreenTypes::kOverSizedFullScreen;
- } else if (origin.x() == 0 &&
- overlay_onscreen_size.width() == screen_size.width()) {
- full_screen_type = OverlayFullScreenTypes::kFullScreenInWidthOnly;
- } else if (origin.y() == 0 &&
- overlay_onscreen_size.height() == screen_size.height()) {
- full_screen_type = OverlayFullScreenTypes::kFullScreenInHeightOnly;
- } else {
- full_screen_type = OverlayFullScreenTypes::kWindowMode;
- }
-
- UMA_HISTOGRAM_ENUMERATION("GPU.DirectComposition.OverlayFullScreenTypes",
- full_screen_type);
-
- // TODO(magchen): To be deleted once we know if this workaround is still
- // needed
- UMA_HISTOGRAM_BOOLEAN(
- "GPU.DirectComposition.DisableLargerThanScreenOverlaysWorkaround",
- workaround_applied);
-}
-
-const char* ProtectedVideoTypeToString(ui::ProtectedVideoType type) {
- switch (type) {
- case ui::ProtectedVideoType::kClear:
- return "Clear";
- case ui::ProtectedVideoType::kSoftwareProtected:
- if (g_supports_overlays)
- return "SoftwareProtected.HasOverlaySupport";
- else
- return "SoftwareProtected.NoOverlaySupport";
- case ui::ProtectedVideoType::kHardwareProtected:
- return "HardwareProtected";
- }
-}
-
-void InitializeHardwareOverlaySupport() {
- if (g_overlay_support_initialized)
- return;
- g_overlay_support_initialized = true;
-
- // Check for DirectComposition support first to prevent likely crashes.
- if (!DirectCompositionSurfaceWin::IsDirectCompositionSupported())
- return;
-
- // Before Windows 10 Anniversary Update (Redstone 1), overlay planes wouldn't
- // be assigned to non-UWP apps.
- if (base::win::GetVersion() < base::win::VERSION_WIN10_RS1)
- return;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
- if (!d3d11_device) {
- DLOG(ERROR) << "Failed to retrieve D3D11 device";
- return;
- }
-
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- if (FAILED(d3d11_device.As(&dxgi_device))) {
- DLOG(ERROR) << "Failed to retrieve DXGI device";
- return;
- }
-
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- if (FAILED(dxgi_device->GetAdapter(&dxgi_adapter))) {
- DLOG(ERROR) << "Failed to retrieve DXGI adapter";
- return;
- }
-
- // This will fail if the D3D device is "Microsoft Basic Display Adapter".
- Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device;
- if (FAILED(d3d11_device.As(&video_device))) {
- DLOG(ERROR) << "Failed to retrieve video device";
- return;
- }
-
- bool supports_nv12_rec709 = false;
- unsigned int i = 0;
- while (true) {
- Microsoft::WRL::ComPtr<IDXGIOutput> output;
- if (FAILED(dxgi_adapter->EnumOutputs(i++, &output)))
- break;
- DCHECK(output);
- Microsoft::WRL::ComPtr<IDXGIOutput3> output3;
- if (FAILED(output.As(&output3)))
- continue;
- DCHECK(output3);
-
- for (auto& info : g_overlay_support_info) {
- if (FAILED(output3->CheckOverlaySupport(
- info.dxgi_format, d3d11_device.Get(), &info.flags))) {
- continue;
- }
- // Per Intel's request, use NV12 only when
- // COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709 is also supported. Rec 709 is
- // commonly used for H.264 and HEVC. At least one Intel Gen9 SKU will not
- // support NV12 overlays.
- if (info.overlay_format == OverlayFormat::kNV12) {
- UINT color_space_support_flags = 0;
- Microsoft::WRL::ComPtr<IDXGIOutput4> output4;
- if (FAILED(output.As(&output4)))
- continue;
-
- if (FAILED(output4->CheckOverlayColorSpaceSupport(
- info.dxgi_format, DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709,
- d3d11_device.Get(), &color_space_support_flags))) {
- continue;
- }
- supports_nv12_rec709 =
- !!(color_space_support_flags &
- DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG_PRESENT);
- }
-
- // Formats are ordered by most preferred to least preferred. Don't choose
- // a less preferred format, but keep going so that we can record overlay
- // support for all formats in UMA.
- if (g_supports_overlays)
- continue;
- // Don't use BGRA overlays in any case, but record support in UMA.
- if (info.overlay_format == OverlayFormat::kBGRA)
- continue;
- // Overlays are supported for NV12 only if the feature flag to prefer NV12
- // over YUY2 is enabled.
- bool prefer_nv12 = base::FeatureList::IsEnabled(
- features::kDirectCompositionPreferNV12Overlays);
- if (info.overlay_format == OverlayFormat::kNV12 &&
- (!prefer_nv12 || !supports_nv12_rec709))
- continue;
- // Some new Intel drivers only claim to support unscaled overlays, but
- // scaled overlays still work. It's possible DWM works around it by
- // performing an extra scaling Blt before calling the driver. Even when
- // scaled overlays aren't actually supported, presentation using the
- // overlay path should be relatively efficient.
- if (info.flags & (DXGI_OVERLAY_SUPPORT_FLAG_DIRECT |
- DXGI_OVERLAY_SUPPORT_FLAG_SCALING)) {
- g_overlay_format_used = info.overlay_format;
- g_overlay_dxgi_format_used = info.dxgi_format;
-
- g_supports_overlays = true;
- g_supports_scaled_overlays =
- !!(info.flags & DXGI_OVERLAY_SUPPORT_FLAG_SCALING);
-
- DXGI_OUTPUT_DESC monitor_desc = {};
- if (SUCCEEDED(output3->GetDesc(&monitor_desc))) {
- g_overlay_monitor_size =
- gfx::Rect(monitor_desc.DesktopCoordinates).size();
- }
- }
- }
- // Early out after the first output that reports overlay support. All
- // outputs are expected to report the same overlay support according to
- // Microsoft's WDDM documentation:
- // https://docs.microsoft.com/en-us/windows-hardware/drivers/display/multiplane-overlay-hardware-requirements
- // TODO(sunnyps): If the above is true, then we can only look at first
- // output instead of iterating over all outputs.
- if (g_supports_overlays)
- break;
- }
- if (g_supports_overlays) {
- UMA_HISTOGRAM_ENUMERATION("GPU.DirectComposition.OverlayFormatUsed2",
- g_overlay_format_used);
- }
- UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.OverlaysSupported",
- g_supports_overlays);
-}
-
-bool CreateSurfaceHandleHelper(HANDLE* handle) {
- using PFN_DCOMPOSITION_CREATE_SURFACE_HANDLE =
- HRESULT(WINAPI*)(DWORD, SECURITY_ATTRIBUTES*, HANDLE*);
- static PFN_DCOMPOSITION_CREATE_SURFACE_HANDLE create_surface_handle_function =
- nullptr;
-
- if (!create_surface_handle_function) {
- HMODULE dcomp = ::GetModuleHandleA("dcomp.dll");
- if (!dcomp) {
- DLOG(ERROR) << "Failed to get handle for dcomp.dll";
- return false;
- }
- create_surface_handle_function =
- reinterpret_cast<PFN_DCOMPOSITION_CREATE_SURFACE_HANDLE>(
- ::GetProcAddress(dcomp, "DCompositionCreateSurfaceHandle"));
- if (!create_surface_handle_function) {
- DLOG(ERROR)
- << "Failed to get address for DCompositionCreateSurfaceHandle";
- return false;
- }
- }
-
- HRESULT hr = create_surface_handle_function(COMPOSITIONOBJECT_ALL_ACCESS,
- nullptr, handle);
- if (FAILED(hr)) {
- DLOG(ERROR) << "DCompositionCreateSurfaceHandle failed with error 0x"
- << std::hex << hr;
- return false;
- }
-
- return true;
-}
-} // namespace
-
-// DCLayerTree manages a tree of direct composition visuals, and associated
-// swap chains for given overlay layers. It maintains a list of pending layers
-// submitted using ScheduleDCLayer() that are presented and committed in
-// CommitAndClearPendingOverlays().
-class DCLayerTree {
- public:
- DCLayerTree(const GpuDriverBugWorkarounds& workarounds)
- : workarounds_(workarounds) {}
-
- // Returns true on success.
- bool Initialize(HWND window,
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device,
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device);
-
- // Present pending overlay layers, and perform a direct composition commit if
- // necessary. Returns true if presentation and commit succeeded.
- bool CommitAndClearPendingOverlays(
- DirectCompositionChildSurfaceWin* root_surface);
-
- // Schedule an overlay layer for the next CommitAndClearPendingOverlays call.
- bool ScheduleDCLayer(const ui::DCRendererLayerParams& params);
-
- // Called by SwapChainPresenter to initialize video processor that can handle
- // at least given input and output size. The video processor is shared across
- // layers so the same one can be reused if it's large enough. Returns true on
- // success.
- bool InitializeVideoProcessor(const gfx::Size& input_size,
- const gfx::Size& output_size);
-
- void SetNeedsCommit() { needs_commit_ = true; }
-
- const Microsoft::WRL::ComPtr<ID3D11VideoDevice>& video_device() const {
- return video_device_;
- }
-
- const Microsoft::WRL::ComPtr<ID3D11VideoContext>& video_context() const {
- return video_context_;
- }
-
- const Microsoft::WRL::ComPtr<ID3D11VideoProcessor>& video_processor() const {
- return video_processor_;
- }
-
- const Microsoft::WRL::ComPtr<ID3D11VideoProcessorEnumerator>&
- video_processor_enumerator() const {
- return video_processor_enumerator_;
- }
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> GetLayerSwapChainForTesting(
- size_t index) const;
-
- const GpuDriverBugWorkarounds& workarounds() const { return workarounds_; }
-
- private:
- class SwapChainPresenter;
-
- const GpuDriverBugWorkarounds workarounds_;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device_;
- Microsoft::WRL::ComPtr<IDCompositionTarget> dcomp_target_;
-
- // The video processor is cached so SwapChains don't have to recreate it
- // whenever they're created.
- Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device_;
- Microsoft::WRL::ComPtr<ID3D11VideoContext> video_context_;
- Microsoft::WRL::ComPtr<ID3D11VideoProcessor> video_processor_;
- Microsoft::WRL::ComPtr<ID3D11VideoProcessorEnumerator>
- video_processor_enumerator_;
-
- // Current video processor input and output size.
- gfx::Size video_input_size_;
- gfx::Size video_output_size_;
-
- // Set to true if a direct composition commit is needed.
- bool needs_commit_ = false;
-
- // Set if root surface is using a swap chain currently.
- Microsoft::WRL::ComPtr<IDXGISwapChain1> root_swap_chain_;
-
- // Set if root surface is using a direct composition surface currently.
- Microsoft::WRL::ComPtr<IDCompositionSurface> root_dcomp_surface_;
- uint64_t root_dcomp_surface_serial_;
-
- // Direct composition visual for root surface.
- Microsoft::WRL::ComPtr<IDCompositionVisual2> root_surface_visual_;
-
- // Root direct composition visual for window dcomp target.
- Microsoft::WRL::ComPtr<IDCompositionVisual2> dcomp_root_visual_;
-
- // List of pending overlay layers from ScheduleDCLayer().
- std::vector<std::unique_ptr<ui::DCRendererLayerParams>> pending_overlays_;
-
- // List of swap chain presenters for previous frame.
- std::vector<std::unique_ptr<SwapChainPresenter>> video_swap_chains_;
-
- DISALLOW_COPY_AND_ASSIGN(DCLayerTree);
-};
-
-// SwapChainPresenter holds a swap chain, direct composition visuals, and other
-// associated resources for a single overlay layer. It is updated by calling
-// PresentToSwapChain(), and can update or recreate resources as necessary.
-class DCLayerTree::SwapChainPresenter {
- public:
- SwapChainPresenter(DCLayerTree* layer_tree,
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device,
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device);
- ~SwapChainPresenter();
-
- // Present the given overlay to swap chain. Returns true on success.
- bool PresentToSwapChain(const ui::DCRendererLayerParams& overlay);
-
- const Microsoft::WRL::ComPtr<IDXGISwapChain1>& swap_chain() const {
- return swap_chain_;
- }
-
- const Microsoft::WRL::ComPtr<IDCompositionVisual2>& visual() const {
- return clip_visual_;
- }
-
- private:
- // Mapped to DirectCompositonVideoPresentationMode UMA enum. Do not remove or
- // remap existing entries!
- enum class VideoPresentationMode {
- kZeroCopyDecodeSwapChain = 0,
- kUploadAndVideoProcessorBlit = 1,
- kBindAndVideoProcessorBlit = 2,
- kMaxValue = kBindAndVideoProcessorBlit,
- };
-
- // Mapped to DecodeSwapChainNotUsedReason UMA enum. Do not remove or remap
- // existing entries.
- enum class DecodeSwapChainNotUsedReason {
- kSoftwareFrame = 0,
- kNv12NotSupported = 1,
- kFailedToPresent = 2,
- kNonDecoderTexture = 3,
- kSharedTexture = 4,
- kIncompatibleTransform = 5,
- kUnitaryTextureArray = 6,
- kMaxValue = kUnitaryTextureArray,
- };
-
- // Upload given YUV buffers to an NV12 texture that can be used to create
- // video processor input view. Returns nullptr on failure.
- Microsoft::WRL::ComPtr<ID3D11Texture2D> UploadVideoImages(
- gl::GLImageMemory* y_image_memory,
- gl::GLImageMemory* uv_image_memory);
-
- // Releases resources that might hold indirect references to the swap chain.
- void ReleaseSwapChainResources();
-
- // Recreate swap chain using given size. Use preferred YUV format if
- // |use_yuv_swap_chain| is true, or BGRA otherwise. Sets flags based on
- // |protected_video_type|. Returns true on success.
- bool ReallocateSwapChain(const gfx::Size& swap_chain_size,
- bool use_yuv_swap_chain,
- ui::ProtectedVideoType protected_video_type,
- bool z_order);
-
- // Returns true if YUV swap chain should be preferred over BGRA swap chain.
- // This changes over time based on stats recorded in |presentation_history|.
- bool ShouldUseYUVSwapChain(ui::ProtectedVideoType protected_video_type);
-
- // Perform a blit using video processor from given input texture to swap chain
- // backbuffer. |input_texture| is the input texture (array), and |input_level|
- // is the index of the texture in the texture array. |keyed_mutex| is
- // optional, and is used to lock the resource for reading. |content_rect| is
- // subrectangle of the input texture that should be blitted to swap chain, and
- // |src_color_space| is the color space of the video.
- bool VideoProcessorBlt(Microsoft::WRL::ComPtr<ID3D11Texture2D> input_texture,
- UINT input_level,
- Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex,
- const gfx::Rect& content_rect,
- const gfx::ColorSpace& src_color_space);
-
- // Returns optimal swap chain size for given layer.
- gfx::Size CalculateSwapChainSize(const ui::DCRendererLayerParams& params);
-
- // Update direct composition visuals for layer with given swap chain size.
- void UpdateVisuals(const ui::DCRendererLayerParams& params,
- const gfx::Size& swap_chain_size);
-
- // Try presenting to a decode swap chain based on various conditions such as
- // global state (e.g. finch, NV12 support), texture flags, and transform.
- // Returns true on success. See PresentToDecodeSwapChain() for more info.
- bool TryPresentToDecodeSwapChain(gl::GLImageDXGI* image_dxgi,
- const gfx::Rect& content_rect,
- const gfx::Size& swap_chain_size);
-
- // Present to a decode swap chain created from compatible video decoder
- // buffers using given |image_dxgi| with destination size |swap_chain_size|.
- // Returns true on success.
- bool PresentToDecodeSwapChain(gl::GLImageDXGI* image_dxgi,
- const gfx::Rect& content_rect,
- const gfx::Size& swap_chain_size);
-
- // Records presentation statistics in UMA and traces (for pixel tests) for the
- // current swap chain which could either be a regular flip swap chain or a
- // decode swap chain.
- void RecordPresentationStatistics();
-
- // Layer tree instance that owns this swap chain presenter.
- DCLayerTree* layer_tree_;
-
- // Current size of swap chain.
- gfx::Size swap_chain_size_;
-
- // Whether the current swap chain is using the preferred YUV format.
- bool is_yuv_swapchain_ = false;
-
- // Whether the swap chain was reallocated, and next present will be the first.
- bool first_present_ = false;
-
- // Whether the current swap chain is presenting protected video, software
- // or hardware protection.
- ui::ProtectedVideoType protected_video_type_ = ui::ProtectedVideoType::kClear;
-
- // Presentation history to track if swap chain was composited or used hardware
- // overlays.
- PresentationHistory presentation_history_;
-
- // Whether creating a YUV swap chain failed.
- bool failed_to_create_yuv_swapchain_ = false;
-
- // Set to true when PresentToDecodeSwapChain fails for the first time after
- // which we won't attempt to use decode swap chain again.
- bool failed_to_present_decode_swapchain_ = false;
-
- // Number of frames since we switched from YUV to BGRA swap chain, or
- // vice-versa.
- int frames_since_color_space_change_ = 0;
-
- // This struct is used to cache information about what visuals are currently
- // being presented so that properties that aren't changed aren't sent to
- // DirectComposition.
- struct VisualInfo {
- gfx::Point offset;
- gfx::Transform transform;
- bool is_clipped = false;
- gfx::Rect clip_rect;
- } visual_info_;
-
- // Direct composition visual containing the swap chain content. Child of
- // |clip_visual_|.
- Microsoft::WRL::ComPtr<IDCompositionVisual2> content_visual_;
-
- // Direct composition visual that applies the clip rect. Parent of
- // |content_visual_|, and root of the visual tree for this layer.
- Microsoft::WRL::ComPtr<IDCompositionVisual2> clip_visual_;
-
- // GLImages that were presented in the last frame.
- scoped_refptr<gl::GLImage> last_y_image_;
- scoped_refptr<gl::GLImage> last_uv_image_;
-
- // NV12 staging texture used for software decoded YUV buffers. Mapped to CPU
- // for copying from YUV buffers. Texture usage is DYNAMIC or STAGING.
- Microsoft::WRL::ComPtr<ID3D11Texture2D> staging_texture_;
- // Used to copy from staging texture with usage STAGING for workarounds.
- Microsoft::WRL::ComPtr<ID3D11Texture2D> copy_texture_;
- gfx::Size staging_texture_size_;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device_;
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain_;
-
- // Handle returned by DCompositionCreateSurfaceHandle() used to create YUV
- // swap chain that can be used for direct composition.
- base::win::ScopedHandle swap_chain_handle_;
-
- // Video processor output view created from swap chain back buffer. Must be
- // cached for performance reasons.
- Microsoft::WRL::ComPtr<ID3D11VideoProcessorOutputView> output_view_;
-
- Microsoft::WRL::ComPtr<IDXGIResource> decode_resource_;
- Microsoft::WRL::ComPtr<IDXGIDecodeSwapChain> decode_swap_chain_;
- Microsoft::WRL::ComPtr<IUnknown> decode_surface_;
-
- DISALLOW_COPY_AND_ASSIGN(SwapChainPresenter);
-};
-
-bool DCLayerTree::Initialize(
- HWND window,
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device,
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device) {
- DCHECK(d3d11_device);
- d3d11_device_ = std::move(d3d11_device);
- DCHECK(dcomp_device);
- dcomp_device_ = std::move(dcomp_device);
-
- Microsoft::WRL::ComPtr<IDCompositionDesktopDevice> desktop_device;
- dcomp_device_.As(&desktop_device);
- DCHECK(desktop_device);
-
- HRESULT hr =
- desktop_device->CreateTargetForHwnd(window, TRUE, &dcomp_target_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateTargetForHwnd failed with error 0x" << std::hex << hr;
- return false;
- }
-
- dcomp_device_->CreateVisual(&dcomp_root_visual_);
- DCHECK(dcomp_root_visual_);
- dcomp_target_->SetRoot(dcomp_root_visual_.Get());
- // A visual inherits the interpolation mode of the parent visual by default.
- // If no visuals set the interpolation mode, the default for the entire visual
- // tree is nearest neighbor interpolation.
- // Set the interpolation mode to Linear to get a better upscaling quality.
- dcomp_root_visual_->SetBitmapInterpolationMode(
- DCOMPOSITION_BITMAP_INTERPOLATION_MODE_LINEAR);
-
- return true;
-}
-
-bool DCLayerTree::InitializeVideoProcessor(const gfx::Size& input_size,
- const gfx::Size& output_size) {
- if (!video_device_) {
- // This can fail if the D3D device is "Microsoft Basic Display Adapter".
- if (FAILED(d3d11_device_.As(&video_device_))) {
- DLOG(ERROR) << "Failed to retrieve video device from D3D11 device";
- return false;
- }
- DCHECK(video_device_);
-
- Microsoft::WRL::ComPtr<ID3D11DeviceContext> context;
- d3d11_device_->GetImmediateContext(&context);
- DCHECK(context);
- context.As(&video_context_);
- DCHECK(video_context_);
- }
-
- if (video_processor_ && SizeContains(video_input_size_, input_size) &&
- SizeContains(video_output_size_, output_size))
- return true;
- TRACE_EVENT2("gpu", "DCLayerTree::InitializeVideoProcessor", "input_size",
- input_size.ToString(), "output_size", output_size.ToString());
- video_input_size_ = input_size;
- video_output_size_ = output_size;
-
- video_processor_.Reset();
- video_processor_enumerator_.Reset();
- D3D11_VIDEO_PROCESSOR_CONTENT_DESC desc = {};
- desc.InputFrameFormat = D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE;
- desc.InputFrameRate.Numerator = 60;
- desc.InputFrameRate.Denominator = 1;
- desc.InputWidth = input_size.width();
- desc.InputHeight = input_size.height();
- desc.OutputFrameRate.Numerator = 60;
- desc.OutputFrameRate.Denominator = 1;
- desc.OutputWidth = output_size.width();
- desc.OutputHeight = output_size.height();
- desc.Usage = D3D11_VIDEO_USAGE_PLAYBACK_NORMAL;
- HRESULT hr = video_device_->CreateVideoProcessorEnumerator(
- &desc, &video_processor_enumerator_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateVideoProcessorEnumerator failed with error 0x"
- << std::hex << hr;
- return false;
- }
-
- hr = video_device_->CreateVideoProcessor(video_processor_enumerator_.Get(), 0,
- &video_processor_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateVideoProcessor failed with error 0x" << std::hex
- << hr;
- return false;
- }
-
- // Auto stream processing (the default) can hurt power consumption.
- video_context_->VideoProcessorSetStreamAutoProcessingMode(
- video_processor_.Get(), 0, FALSE);
- return true;
-}
-
-Microsoft::WRL::ComPtr<IDXGISwapChain1>
-DCLayerTree::GetLayerSwapChainForTesting(size_t index) const {
- if (index < video_swap_chains_.size())
- return video_swap_chains_[index]->swap_chain();
- return nullptr;
-}
-
-DCLayerTree::SwapChainPresenter::SwapChainPresenter(
- DCLayerTree* layer_tree,
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device,
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device)
- : layer_tree_(layer_tree),
- d3d11_device_(d3d11_device),
- dcomp_device_(dcomp_device) {}
-
-DCLayerTree::SwapChainPresenter::~SwapChainPresenter() {}
-
-bool DCLayerTree::SwapChainPresenter::ShouldUseYUVSwapChain(
- ui::ProtectedVideoType protected_video_type) {
- // TODO(crbug.com/850799): Assess power/perf impact when protected video
- // swap chain is composited by DWM.
-
- // Always prefer YUV swap chain for hardware protected video for now.
- if (protected_video_type == ui::ProtectedVideoType::kHardwareProtected)
- return true;
-
- // For software protected video, BGRA swap chain is preferred if hardware
- // overlay is not supported for better power efficiency.
- // Currently, software protected video is the only case that overlay swap
- // chain is used when hardware overlay is not suppported.
- if (protected_video_type == ui::ProtectedVideoType::kSoftwareProtected &&
- !g_supports_overlays)
- return false;
-
- if (failed_to_create_yuv_swapchain_)
- return false;
-
- // Start out as YUV.
- if (!presentation_history_.valid())
- return true;
- int composition_count = presentation_history_.composed_count();
-
- // It's more efficient to use a BGRA backbuffer instead of YUV if overlays
- // aren't being used, as otherwise DWM will use the video processor a second
- // time to convert it to BGRA before displaying it on screen.
-
- if (is_yuv_swapchain_) {
- // Switch to BGRA once 3/4 of presents are composed.
- return composition_count < (PresentationHistory::kPresentsToStore * 3 / 4);
- } else {
- // Switch to YUV once 3/4 are using overlays (or unknown).
- return composition_count < (PresentationHistory::kPresentsToStore / 4);
- }
-}
-
-Microsoft::WRL::ComPtr<ID3D11Texture2D>
-DCLayerTree::SwapChainPresenter::UploadVideoImages(
- gl::GLImageMemory* y_image_memory,
- gl::GLImageMemory* uv_image_memory) {
- gfx::Size texture_size = y_image_memory->GetSize();
- gfx::Size uv_image_size = uv_image_memory->GetSize();
- if (uv_image_size.height() != texture_size.height() / 2 ||
- uv_image_size.width() != texture_size.width() / 2 ||
- y_image_memory->format() != gfx::BufferFormat::R_8 ||
- uv_image_memory->format() != gfx::BufferFormat::RG_88) {
- DLOG(ERROR) << "Invalid NV12 GLImageMemory properties.";
- return nullptr;
- }
-
- TRACE_EVENT1("gpu", "SwapChainPresenter::UploadVideoImages", "size",
- texture_size.ToString());
-
- static crash_reporter::CrashKeyString<32> texture_size_key(
- "dynamic-texture-size");
- texture_size_key.Set(texture_size.ToString());
-
- static crash_reporter::CrashKeyString<2> first_use_key(
- "dynamic-texture-first-use");
- bool first_use = !staging_texture_ || (staging_texture_size_ != texture_size);
- first_use_key.Set(first_use ? "1" : "0");
-
- bool use_dynamic_texture =
- !layer_tree_->workarounds().disable_nv12_dynamic_textures;
-
- D3D11_TEXTURE2D_DESC desc = {};
- desc.Width = texture_size.width();
- desc.Height = texture_size.height();
- desc.Format = DXGI_FORMAT_NV12;
- desc.MipLevels = 1;
- desc.ArraySize = 1;
- desc.Usage = use_dynamic_texture ? D3D11_USAGE_DYNAMIC : D3D11_USAGE_STAGING;
- // This isn't actually bound to a decoder, but dynamic textures need
- // BindFlags to be nonzero and D3D11_BIND_DECODER also works when creating
- // a VideoProcessorInputView.
- desc.BindFlags = use_dynamic_texture ? D3D11_BIND_DECODER : 0;
- desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
- desc.MiscFlags = 0;
- desc.SampleDesc.Count = 1;
-
- if (!staging_texture_ || (staging_texture_size_ != texture_size)) {
- staging_texture_.Reset();
- copy_texture_.Reset();
- HRESULT hr =
- d3d11_device_->CreateTexture2D(&desc, nullptr, &staging_texture_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Creating D3D11 video staging texture failed: " << std::hex
- << hr;
- return nullptr;
- }
- DCHECK(staging_texture_);
- staging_texture_size_ = texture_size;
- }
-
- Microsoft::WRL::ComPtr<ID3D11DeviceContext> context;
- d3d11_device_->GetImmediateContext(&context);
- DCHECK(context);
-
- D3D11_MAP map_type =
- use_dynamic_texture ? D3D11_MAP_WRITE_DISCARD : D3D11_MAP_WRITE;
- D3D11_MAPPED_SUBRESOURCE mapped_resource;
- HRESULT hr =
- context->Map(staging_texture_.Get(), 0, map_type, 0, &mapped_resource);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Mapping D3D11 video staging texture failed: " << std::hex
- << hr;
- return nullptr;
- }
-
- size_t dest_stride = mapped_resource.RowPitch;
- for (int y = 0; y < texture_size.height(); y++) {
- const uint8_t* y_source =
- y_image_memory->memory() + y * y_image_memory->stride();
- uint8_t* dest =
- reinterpret_cast<uint8_t*>(mapped_resource.pData) + dest_stride * y;
- memcpy(dest, y_source, texture_size.width());
- }
-
- uint8_t* uv_dest_plane_start =
- reinterpret_cast<uint8_t*>(mapped_resource.pData) +
- dest_stride * texture_size.height();
- for (int y = 0; y < uv_image_size.height(); y++) {
- const uint8_t* uv_source =
- uv_image_memory->memory() + y * uv_image_memory->stride();
- uint8_t* dest = uv_dest_plane_start + dest_stride * y;
- memcpy(dest, uv_source, texture_size.width());
- }
- context->Unmap(staging_texture_.Get(), 0);
-
- if (use_dynamic_texture)
- return staging_texture_;
-
- if (!copy_texture_) {
- desc.Usage = D3D11_USAGE_DEFAULT;
- desc.BindFlags = D3D11_BIND_DECODER;
- desc.CPUAccessFlags = 0;
- HRESULT hr = d3d11_device_->CreateTexture2D(&desc, nullptr, &copy_texture_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Creating D3D11 video upload texture failed: " << std::hex
- << hr;
- return nullptr;
- }
- DCHECK(copy_texture_);
- }
- TRACE_EVENT0("gpu", "SwapChainPresenter::UploadVideoImages::CopyResource");
- context->CopyResource(copy_texture_.Get(), staging_texture_.Get());
- return copy_texture_;
-}
-
-gfx::Size DCLayerTree::SwapChainPresenter::CalculateSwapChainSize(
- const ui::DCRendererLayerParams& params) {
- // Swap chain size is the minimum of the on-screen size and the source size so
- // the video processor can do the minimal amount of work and the overlay has
- // to read the minimal amount of data. DWM is also less likely to promote a
- // surface to an overlay if it's much larger than its area on-screen.
- gfx::Size swap_chain_size = params.content_rect.size();
- gfx::Size overlay_onscreen_size = swap_chain_size;
- gfx::RectF bounds(params.quad_rect);
- params.transform.TransformRect(&bounds);
- overlay_onscreen_size = gfx::ToEnclosingRect(bounds).size();
-
- // If transform isn't a scale or translation then swap chain can't be promoted
- // to an overlay so avoid blitting to a large surface unnecessarily. Also,
- // after the video rotation fix (crbug.com/904035), using rotated size for
- // swap chain size will cause stretching since there's no squashing factor in
- // the transform to counteract.
- // TODO(sunnyps): Support 90/180/270 deg rotations using video context.
- if (params.transform.IsScaleOrTranslation()) {
- swap_chain_size = overlay_onscreen_size;
- }
-
- if (g_supports_scaled_overlays) {
- // Downscaling doesn't work on Intel display HW, and so DWM will perform an
- // extra BLT to avoid HW downscaling. This prevents the use of hardware
- // overlays especially for protected video.
- swap_chain_size.SetToMin(params.content_rect.size());
- }
-
- bool workaround_applied = false;
- if (layer_tree_->workarounds().disable_larger_than_screen_overlays &&
- !g_overlay_monitor_size.IsEmpty()) {
- // Because of the rounding when converting between pixels and DIPs, a
- // fullscreen video can become slightly larger than the monitor - e.g. on
- // a 3000x2000 monitor with a scale factor of 1.75 a 1920x1079 video can
- // become 3002x1689.
- // On older Intel drivers, swapchains that are bigger than the monitor
- // won't be put into overlays, which will hurt power usage a lot. On those
- // systems, the scaling can be adjusted very slightly so that it's less
- // than the monitor size. This should be close to imperceptible.
- // TODO(jbauman): Remove when http://crbug.com/668278 is fixed.
- const int kOversizeMargin = 3;
-
- if ((swap_chain_size.width() > g_overlay_monitor_size.width()) &&
- (swap_chain_size.width() <=
- g_overlay_monitor_size.width() + kOversizeMargin)) {
- swap_chain_size.set_width(g_overlay_monitor_size.width());
- workaround_applied = true;
- }
-
- if ((swap_chain_size.height() > g_overlay_monitor_size.height()) &&
- (swap_chain_size.height() <=
- g_overlay_monitor_size.height() + kOversizeMargin)) {
- swap_chain_size.set_height(g_overlay_monitor_size.height());
- workaround_applied = true;
- }
- }
- RecordOverlayFullScreenTypes(
- workaround_applied,
- /*overlay_onscreen_rect*/ gfx::ToEnclosingRect(bounds));
-
- // 4:2:2 subsampled formats like YUY2 must have an even width, and 4:2:0
- // subsampled formats like NV12 must have an even width and height.
- if (swap_chain_size.width() % 2 == 1)
- swap_chain_size.set_width(swap_chain_size.width() + 1);
- if (swap_chain_size.height() % 2 == 1)
- swap_chain_size.set_height(swap_chain_size.height() + 1);
-
- return swap_chain_size;
-}
-
-void DCLayerTree::SwapChainPresenter::UpdateVisuals(
- const ui::DCRendererLayerParams& params,
- const gfx::Size& swap_chain_size) {
- if (!content_visual_) {
- DCHECK(!clip_visual_);
- dcomp_device_->CreateVisual(&clip_visual_);
- DCHECK(clip_visual_);
- dcomp_device_->CreateVisual(&content_visual_);
- DCHECK(content_visual_);
- clip_visual_->AddVisual(content_visual_.Get(), FALSE, nullptr);
- layer_tree_->SetNeedsCommit();
- }
-
- // Visual offset is applied before transform so it behaves similar to how the
- // compositor uses transform to map quad rect in layer space to target space.
- gfx::Point offset = params.quad_rect.origin();
- gfx::Transform transform = params.transform;
-
- // Transform is correct for scaling up |quad_rect| to on screen bounds, but
- // doesn't include scaling transform from |swap_chain_size| to |quad_rect|.
- // Since |swap_chain_size| could be equal to on screen bounds, and therefore
- // possibly larger than |quad_rect|, this scaling could be downscaling, but
- // only to the extent that it would cancel upscaling already in the transform.
- float swap_chain_scale_x =
- params.quad_rect.width() * 1.0f / swap_chain_size.width();
- float swap_chain_scale_y =
- params.quad_rect.height() * 1.0f / swap_chain_size.height();
- transform.Scale(swap_chain_scale_x, swap_chain_scale_y);
-
- if (visual_info_.offset != offset || visual_info_.transform != transform) {
- visual_info_.offset = offset;
- visual_info_.transform = transform;
- layer_tree_->SetNeedsCommit();
-
- content_visual_->SetOffsetX(offset.x());
- content_visual_->SetOffsetY(offset.y());
-
- Microsoft::WRL::ComPtr<IDCompositionMatrixTransform> dcomp_transform;
- dcomp_device_->CreateMatrixTransform(&dcomp_transform);
- DCHECK(dcomp_transform);
- // SkMatrix44 is column-major, but D2D_MATRIX_3x2_F is row-major.
- D2D_MATRIX_3X2_F d2d_matrix = {
- {{transform.matrix().get(0, 0), transform.matrix().get(1, 0),
- transform.matrix().get(0, 1), transform.matrix().get(1, 1),
- transform.matrix().get(0, 3), transform.matrix().get(1, 3)}}};
- dcomp_transform->SetMatrix(d2d_matrix);
- content_visual_->SetTransform(dcomp_transform.Get());
- }
-
- if (visual_info_.is_clipped != params.is_clipped ||
- visual_info_.clip_rect != params.clip_rect) {
- visual_info_.is_clipped = params.is_clipped;
- visual_info_.clip_rect = params.clip_rect;
- layer_tree_->SetNeedsCommit();
- // DirectComposition clips happen in the pre-transform visual space, while
- // cc/ clips happen post-transform. So the clip needs to go on a separate
- // parent visual that's untransformed.
- if (params.is_clipped) {
- Microsoft::WRL::ComPtr<IDCompositionRectangleClip> clip;
- dcomp_device_->CreateRectangleClip(&clip);
- DCHECK(clip);
- clip->SetLeft(params.clip_rect.x());
- clip->SetRight(params.clip_rect.right());
- clip->SetBottom(params.clip_rect.bottom());
- clip->SetTop(params.clip_rect.y());
- clip_visual_->SetClip(clip.Get());
- } else {
- clip_visual_->SetClip(nullptr);
- }
- }
-}
-
-bool DCLayerTree::SwapChainPresenter::TryPresentToDecodeSwapChain(
- gl::GLImageDXGI* image_dxgi,
- const gfx::Rect& content_rect,
- const gfx::Size& swap_chain_size) {
- if (!base::FeatureList::IsEnabled(
- features::kDirectCompositionUseNV12DecodeSwapChain))
- return false;
-
- auto not_used_reason = DecodeSwapChainNotUsedReason::kFailedToPresent;
-
- bool nv12_supported = g_overlay_format_used == OverlayFormat::kNV12;
- // TODO(sunnyps): Try using decode swap chain for uploaded video images.
- if (image_dxgi && nv12_supported && !failed_to_present_decode_swapchain_) {
- D3D11_TEXTURE2D_DESC texture_desc = {};
- image_dxgi->texture()->GetDesc(&texture_desc);
-
- bool is_decoder_texture = texture_desc.BindFlags & D3D11_BIND_DECODER;
-
- // Decode swap chains do not support shared resources.
- // TODO(sunnyps): Find a workaround for when the decoder moves to its own
- // thread and D3D device. See https://crbug.com/911847
- bool is_shared_texture =
- texture_desc.MiscFlags &
- (D3D11_RESOURCE_MISC_SHARED | D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX |
- D3D11_RESOURCE_MISC_SHARED_NTHANDLE);
-
- // DXVA decoder (or rather MFT) sometimes gives texture arrays with one
- // element, which constitutes most of decode swap chain creation failures.
- bool is_unitary_texture_array = texture_desc.ArraySize <= 1;
-
- // Rotated videos are not promoted to overlays. We plan to implement
- // rotation using video processor instead of via direct composition. Also
- // check for skew and any downscaling specified to direct composition.
- bool is_overlay_supported_transform =
- visual_info_.transform.IsPositiveScaleOrTranslation();
-
- // Downscaled video isn't promoted to hardware overlays. We prefer to
- // blit into the smaller size so that it can be promoted to a hardware
- // overlay.
- float swap_chain_scale_x =
- swap_chain_size.width() * 1.0f / content_rect.width();
- float swap_chain_scale_y =
- swap_chain_size.height() * 1.0f / content_rect.height();
-
- is_overlay_supported_transform = is_overlay_supported_transform &&
- (swap_chain_scale_x >= 1.0f) &&
- (swap_chain_scale_y >= 1.0f);
-
- if (is_decoder_texture && !is_shared_texture && !is_unitary_texture_array &&
- is_overlay_supported_transform) {
- if (PresentToDecodeSwapChain(image_dxgi, content_rect, swap_chain_size))
- return true;
- ReleaseSwapChainResources();
- failed_to_present_decode_swapchain_ = true;
- not_used_reason = DecodeSwapChainNotUsedReason::kFailedToPresent;
- DLOG(ERROR)
- << "Present to decode swap chain failed - falling back to blit";
- } else if (!is_decoder_texture) {
- not_used_reason = DecodeSwapChainNotUsedReason::kNonDecoderTexture;
- } else if (is_shared_texture) {
- not_used_reason = DecodeSwapChainNotUsedReason::kSharedTexture;
- } else if (is_unitary_texture_array) {
- not_used_reason = DecodeSwapChainNotUsedReason::kUnitaryTextureArray;
- } else if (!is_overlay_supported_transform) {
- not_used_reason = DecodeSwapChainNotUsedReason::kIncompatibleTransform;
- }
- } else if (!image_dxgi) {
- not_used_reason = DecodeSwapChainNotUsedReason::kSoftwareFrame;
- } else if (!nv12_supported) {
- not_used_reason = DecodeSwapChainNotUsedReason::kNv12NotSupported;
- } else if (failed_to_present_decode_swapchain_) {
- not_used_reason = DecodeSwapChainNotUsedReason::kFailedToPresent;
- }
-
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.DirectComposition.DecodeSwapChainNotUsedReason", not_used_reason);
- return false;
-}
-
-bool DCLayerTree::SwapChainPresenter::PresentToDecodeSwapChain(
- gl::GLImageDXGI* image_dxgi,
- const gfx::Rect& content_rect,
- const gfx::Size& swap_chain_size) {
- DCHECK(!swap_chain_size.IsEmpty());
-
- TRACE_EVENT2("gpu", "SwapChainPresenter::PresentToDecodeSwapChain",
- "content_rect", content_rect.ToString(), "swap_chain_size",
- swap_chain_size.ToString());
-
- Microsoft::WRL::ComPtr<IDXGIResource> decode_resource;
- image_dxgi->texture().As(&decode_resource);
- DCHECK(decode_resource);
-
- if (!decode_swap_chain_ || decode_resource_ != decode_resource) {
- TRACE_EVENT0(
- "gpu",
- "SwapChainPresenter::PresentToDecodeSwapChain::CreateDecodeSwapChain");
- ReleaseSwapChainResources();
-
- decode_resource_ = decode_resource;
-
- HANDLE handle = INVALID_HANDLE_VALUE;
- if (!CreateSurfaceHandleHelper(&handle))
- return false;
- swap_chain_handle_.Set(handle);
-
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- d3d11_device_.As(&dxgi_device);
- DCHECK(dxgi_device);
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- dxgi_device->GetAdapter(&dxgi_adapter);
- DCHECK(dxgi_adapter);
- Microsoft::WRL::ComPtr<IDXGIFactoryMedia> media_factory;
- dxgi_adapter->GetParent(IID_PPV_ARGS(&media_factory));
- DCHECK(media_factory);
-
- DXGI_DECODE_SWAP_CHAIN_DESC desc = {};
- desc.Flags = 0;
- HRESULT hr =
- media_factory->CreateDecodeSwapChainForCompositionSurfaceHandle(
- d3d11_device_.Get(), swap_chain_handle_.Get(), &desc,
- decode_resource_.Get(), nullptr, &decode_swap_chain_);
- base::UmaHistogramSparse(
- "GPU.DirectComposition.DecodeSwapChainCreationResult", hr);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateDecodeSwapChainForCompositionSurfaceHandle failed "
- "with error 0x"
- << std::hex << hr;
- return false;
- }
- DCHECK(decode_swap_chain_);
-
- Microsoft::WRL::ComPtr<IDCompositionDesktopDevice> desktop_device;
- dcomp_device_.As(&desktop_device);
- DCHECK(desktop_device);
-
- desktop_device->CreateSurfaceFromHandle(swap_chain_handle_.Get(),
- &decode_surface_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateSurfaceFromHandle failed with error 0x" << std::hex
- << hr;
- return false;
- }
- DCHECK(decode_surface_);
-
- content_visual_->SetContent(decode_surface_.Get());
- layer_tree_->SetNeedsCommit();
- } else if (last_y_image_ == image_dxgi && last_uv_image_ == image_dxgi &&
- swap_chain_size_ == swap_chain_size) {
- // Early out if we're presenting the same image again.
- return true;
- }
-
- RECT source_rect = content_rect.ToRECT();
- decode_swap_chain_->SetSourceRect(&source_rect);
-
- decode_swap_chain_->SetDestSize(swap_chain_size.width(),
- swap_chain_size.height());
- RECT target_rect = gfx::Rect(swap_chain_size).ToRECT();
- decode_swap_chain_->SetTargetRect(&target_rect);
-
- gfx::ColorSpace color_space = image_dxgi->color_space();
- if (!color_space.IsValid())
- color_space = gfx::ColorSpace::CreateREC709();
-
- // TODO(sunnyps): Move this to gfx::ColorSpaceWin helper where we can access
- // internal color space state and do a better job.
- // Common color spaces have primaries and transfer function similar to BT 709
- // and there are no other choices anyway.
- int flags = DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_BT709;
- // Proper Rec 709 and 601 have limited or nominal color range.
- if (color_space == gfx::ColorSpace::CreateREC709() ||
- color_space == gfx::ColorSpace::CreateREC601()) {
- flags |= DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_NOMINAL_RANGE;
- }
- // xvYCC allows colors outside nominal range to encode negative colors that
- // allows for a wider gamut.
- if (color_space.FullRangeEncodedValues()) {
- flags |= DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_xvYCC;
- }
- decode_swap_chain_->SetColorSpace(
- static_cast<DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS>(flags));
-
- HRESULT hr = decode_swap_chain_->PresentBuffer(image_dxgi->level(), 1, 0);
- // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only indicates
- // that the window is occluded and we can stop rendering.
- if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
- DLOG(ERROR) << "PresentBuffer failed with error 0x" << std::hex << hr;
- return false;
- }
-
- last_y_image_ = image_dxgi;
- last_uv_image_ = image_dxgi;
- swap_chain_size_ = swap_chain_size;
- if (is_yuv_swapchain_) {
- frames_since_color_space_change_++;
- } else {
- UMA_HISTOGRAM_COUNTS_1000(
- "GPU.DirectComposition.FramesSinceColorSpaceChange",
- frames_since_color_space_change_);
- frames_since_color_space_change_ = 0;
- is_yuv_swapchain_ = true;
- }
- RecordPresentationStatistics();
- return true;
-}
-
-bool DCLayerTree::SwapChainPresenter::PresentToSwapChain(
- const ui::DCRendererLayerParams& params) {
- gl::GLImageDXGI* image_dxgi =
- gl::GLImageDXGI::FromGLImage(params.y_image.get());
- gl::GLImageMemory* y_image_memory =
- gl::GLImageMemory::FromGLImage(params.y_image.get());
- gl::GLImageMemory* uv_image_memory =
- gl::GLImageMemory::FromGLImage(params.uv_image.get());
-
- if (!image_dxgi && (!y_image_memory || !uv_image_memory)) {
- DLOG(ERROR) << "Video GLImages are missing";
- // No need to release resources as context will be lost soon.
- return false;
- }
-
- gfx::Size swap_chain_size = CalculateSwapChainSize(params);
-
- TRACE_EVENT2("gpu", "SwapChainPresenter::PresentToSwapChain",
- "hardware_frame", !!image_dxgi, "swap_chain_size",
- swap_chain_size.ToString());
-
- // Do not create a swap chain if swap chain size will be empty.
- if (swap_chain_size.IsEmpty()) {
- swap_chain_size_ = swap_chain_size;
- if (swap_chain_) {
- ReleaseSwapChainResources();
- content_visual_->SetContent(nullptr);
- layer_tree_->SetNeedsCommit();
- }
- return true;
- }
-
- UpdateVisuals(params, swap_chain_size);
-
- if (TryPresentToDecodeSwapChain(image_dxgi, params.content_rect,
- swap_chain_size)) {
- return true;
- }
-
- bool swap_chain_resized = swap_chain_size_ != swap_chain_size;
- bool use_yuv_swap_chain = ShouldUseYUVSwapChain(params.protected_video_type);
- bool toggle_yuv_swapchain = use_yuv_swap_chain != is_yuv_swapchain_;
- bool toggle_protected_video =
- protected_video_type_ != params.protected_video_type;
-
- // Try reallocating swap chain if resizing fails.
- if (!swap_chain_ || swap_chain_resized || toggle_yuv_swapchain ||
- toggle_protected_video) {
- if (!ReallocateSwapChain(swap_chain_size, use_yuv_swap_chain,
- params.protected_video_type, params.z_order)) {
- ReleaseSwapChainResources();
- return false;
- }
- content_visual_->SetContent(swap_chain_.Get());
- layer_tree_->SetNeedsCommit();
- } else if (last_y_image_ == params.y_image &&
- last_uv_image_ == params.uv_image) {
- // The swap chain is presenting the same images as last swap, which means
- // that the images were never returned to the video decoder and should
- // have the same contents as last time. It shouldn't need to be redrawn.
- return true;
- }
- last_y_image_ = params.y_image;
- last_uv_image_ = params.uv_image;
-
- Microsoft::WRL::ComPtr<ID3D11Texture2D> input_texture;
- UINT input_level;
- Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex;
- if (image_dxgi) {
- input_texture = image_dxgi->texture();
- input_level = (UINT)image_dxgi->level();
- // Keyed mutex may not exist.
- keyed_mutex = image_dxgi->keyed_mutex();
- staging_texture_.Reset();
- copy_texture_.Reset();
- } else {
- DCHECK(y_image_memory);
- DCHECK(uv_image_memory);
- input_texture = UploadVideoImages(y_image_memory, uv_image_memory);
- input_level = 0;
- }
-
- if (!input_texture) {
- DLOG(ERROR) << "Video image has no texture";
- return false;
- }
-
- // TODO(sunnyps): Use correct color space for uploaded video frames.
- gfx::ColorSpace src_color_space = gfx::ColorSpace::CreateREC709();
- if (image_dxgi && image_dxgi->color_space().IsValid())
- src_color_space = image_dxgi->color_space();
-
- if (!VideoProcessorBlt(input_texture, input_level, keyed_mutex,
- params.content_rect, src_color_space)) {
- return false;
- }
-
- if (first_present_) {
- first_present_ = false;
-
- HRESULT hr = swap_chain_->Present(0, 0);
- // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only indicates
- // that the window is occluded and we can stop rendering.
- if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
- DLOG(ERROR) << "Present failed with error 0x" << std::hex << hr;
- return false;
- }
-
- // DirectComposition can display black for a swap chain between the first
- // and second time it's presented to - maybe the first Present can get
- // lost somehow and it shows the wrong buffer. In that case copy the
- // buffers so both have the correct contents, which seems to help. The
- // first Present() after this needs to have SyncInterval > 0, or else the
- // workaround doesn't help.
- Microsoft::WRL::ComPtr<ID3D11Texture2D> dest_texture;
- swap_chain_->GetBuffer(0, IID_PPV_ARGS(&dest_texture));
- DCHECK(dest_texture);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> src_texture;
- hr = swap_chain_->GetBuffer(1, IID_PPV_ARGS(&src_texture));
- DCHECK(src_texture);
- Microsoft::WRL::ComPtr<ID3D11DeviceContext> context;
- d3d11_device_->GetImmediateContext(&context);
- DCHECK(context);
- context->CopyResource(dest_texture.Get(), src_texture.Get());
-
- // Additionally wait for the GPU to finish executing its commands, or
- // there still may be a black flicker when presenting expensive content
- // (e.g. 4k video).
- Microsoft::WRL::ComPtr<IDXGIDevice2> dxgi_device2;
- d3d11_device_.As(&dxgi_device2);
- DCHECK(dxgi_device2);
- base::WaitableEvent event(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- hr = dxgi_device2->EnqueueSetEvent(event.handle());
- DCHECK(SUCCEEDED(hr));
- event.Wait();
- }
-
- // Ignore DXGI_STATUS_OCCLUDED since that's not an error but only indicates
- // that the window is occluded and we can stop rendering.
- HRESULT hr = swap_chain_->Present(1, 0);
- if (FAILED(hr) && hr != DXGI_STATUS_OCCLUDED) {
- DLOG(ERROR) << "Present failed with error 0x" << std::hex << hr;
- return false;
- }
- frames_since_color_space_change_++;
- RecordPresentationStatistics();
- return true;
-}
-
-void DCLayerTree::SwapChainPresenter::RecordPresentationStatistics() {
- OverlayFormat swap_chain_format =
- is_yuv_swapchain_ ? g_overlay_format_used : OverlayFormat::kBGRA;
- UMA_HISTOGRAM_ENUMERATION("GPU.DirectComposition.SwapChainFormat2",
- swap_chain_format);
-
- VideoPresentationMode presentation_mode;
- if (decode_swap_chain_) {
- presentation_mode = VideoPresentationMode::kZeroCopyDecodeSwapChain;
- } else if (staging_texture_) {
- presentation_mode = VideoPresentationMode::kUploadAndVideoProcessorBlit;
- } else {
- presentation_mode = VideoPresentationMode::kBindAndVideoProcessorBlit;
- }
- UMA_HISTOGRAM_ENUMERATION("GPU.DirectComposition.VideoPresentationMode",
- presentation_mode);
-
- UMA_HISTOGRAM_BOOLEAN("GPU.DirectComposition.DecodeSwapChainUsed",
- !!decode_swap_chain_);
-
- TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("gpu.service"),
- "SwapChain::Present", TRACE_EVENT_SCOPE_THREAD,
- "PixelFormat", swap_chain_format, "ZeroCopy",
- !!decode_swap_chain_);
- HRESULT hr = 0;
- Microsoft::WRL::ComPtr<IDXGISwapChainMedia> swap_chain_media;
- if (decode_swap_chain_) {
- hr = decode_swap_chain_.As(&swap_chain_media);
- } else {
- DCHECK(swap_chain_);
- hr = swap_chain_.As(&swap_chain_media);
- }
- if (SUCCEEDED(hr)) {
- DCHECK(swap_chain_media);
- DXGI_FRAME_STATISTICS_MEDIA stats = {};
- // GetFrameStatisticsMedia fails with DXGI_ERROR_FRAME_STATISTICS_DISJOINT
- // sometimes, which means an event (such as power cycle) interrupted the
- // gathering of presentation statistics. In this situation, calling the
- // function again succeeds but returns with CompositionMode = NONE.
- // Waiting for the DXGI adapter to finish presenting before calling the
- // function doesn't get rid of the failure.
- HRESULT hr = swap_chain_media->GetFrameStatisticsMedia(&stats);
- int mode = -1;
- if (SUCCEEDED(hr)) {
- base::UmaHistogramSparse("GPU.DirectComposition.CompositionMode",
- stats.CompositionMode);
- presentation_history_.AddSample(stats.CompositionMode);
- mode = stats.CompositionMode;
- }
- // Record CompositionMode as -1 if GetFrameStatisticsMedia() fails.
- TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("gpu.service"),
- "GetFrameStatisticsMedia", TRACE_EVENT_SCOPE_THREAD,
- "CompositionMode", mode);
- }
-}
-
-bool DCLayerTree::SwapChainPresenter::VideoProcessorBlt(
- Microsoft::WRL::ComPtr<ID3D11Texture2D> input_texture,
- UINT input_level,
- Microsoft::WRL::ComPtr<IDXGIKeyedMutex> keyed_mutex,
- const gfx::Rect& content_rect,
- const gfx::ColorSpace& src_color_space) {
- TRACE_EVENT2("gpu", "SwapChainPresenter::VideoProcessorBlt", "content_rect",
- content_rect.ToString(), "swap_chain_size",
- swap_chain_size_.ToString());
- if (!layer_tree_->InitializeVideoProcessor(content_rect.size(),
- swap_chain_size_)) {
- return false;
- }
- Microsoft::WRL::ComPtr<ID3D11VideoContext> video_context =
- layer_tree_->video_context();
- Microsoft::WRL::ComPtr<ID3D11VideoProcessor> video_processor =
- layer_tree_->video_processor();
-
- gfx::ColorSpace output_color_space =
- is_yuv_swapchain_ ? src_color_space : gfx::ColorSpace::CreateSRGB();
-
- if (base::FeatureList::IsEnabled(kFallbackBT709VideoToBT601) &&
- (output_color_space == gfx::ColorSpace::CreateREC709())) {
- output_color_space = gfx::ColorSpace::CreateREC601();
- }
-
- Microsoft::WRL::ComPtr<IDXGISwapChain3> swap_chain3;
- Microsoft::WRL::ComPtr<ID3D11VideoContext1> context1;
- if (SUCCEEDED(swap_chain_.As(&swap_chain3)) &&
- SUCCEEDED(video_context.As(&context1))) {
- DCHECK(swap_chain3);
- DCHECK(context1);
- // Set input color space.
- context1->VideoProcessorSetStreamColorSpace1(
- video_processor.Get(), 0,
- gfx::ColorSpaceWin::GetDXGIColorSpace(src_color_space));
- // Set output color space.
- DXGI_COLOR_SPACE_TYPE output_dxgi_color_space =
- gfx::ColorSpaceWin::GetDXGIColorSpace(
- output_color_space, is_yuv_swapchain_ /* force_yuv */);
- if (SUCCEEDED(swap_chain3->SetColorSpace1(output_dxgi_color_space))) {
- context1->VideoProcessorSetOutputColorSpace1(video_processor.Get(),
- output_dxgi_color_space);
- }
- } else {
- // This can't handle as many different types of color spaces, so use it
- // only if ID3D11VideoContext1 isn't available.
- D3D11_VIDEO_PROCESSOR_COLOR_SPACE src_d3d11_color_space =
- gfx::ColorSpaceWin::GetD3D11ColorSpace(src_color_space);
- video_context->VideoProcessorSetStreamColorSpace(video_processor.Get(), 0,
- &src_d3d11_color_space);
- D3D11_VIDEO_PROCESSOR_COLOR_SPACE output_d3d11_color_space =
- gfx::ColorSpaceWin::GetD3D11ColorSpace(output_color_space);
- video_context->VideoProcessorSetOutputColorSpace(video_processor.Get(),
- &output_d3d11_color_space);
- }
-
- {
- base::Optional<ScopedReleaseKeyedMutex> release_keyed_mutex;
- if (keyed_mutex) {
- // The producer may still be using this texture for a short period of
- // time, so wait long enough to hopefully avoid glitches. For example,
- // all levels of the texture share the same keyed mutex, so if the
- // hardware decoder acquired the mutex to decode into a different array
- // level then it still may block here temporarily.
- const int kMaxSyncTimeMs = 1000;
- HRESULT hr = keyed_mutex->AcquireSync(0, kMaxSyncTimeMs);
- if (FAILED(hr)) {
- DLOG(ERROR) << "Error acquiring keyed mutex: " << std::hex << hr;
- return false;
- }
- release_keyed_mutex.emplace(keyed_mutex, 0);
- }
-
- Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device =
- layer_tree_->video_device();
- Microsoft::WRL::ComPtr<ID3D11VideoProcessorEnumerator>
- video_processor_enumerator = layer_tree_->video_processor_enumerator();
-
- D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC input_desc = {};
- input_desc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
- input_desc.Texture2D.ArraySlice = input_level;
-
- Microsoft::WRL::ComPtr<ID3D11VideoProcessorInputView> input_view;
- HRESULT hr = video_device->CreateVideoProcessorInputView(
- input_texture.Get(), video_processor_enumerator.Get(), &input_desc,
- &input_view);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateVideoProcessorInputView failed with error 0x"
- << std::hex << hr;
- return false;
- }
-
- D3D11_VIDEO_PROCESSOR_STREAM stream = {};
- stream.Enable = true;
- stream.OutputIndex = 0;
- stream.InputFrameOrField = 0;
- stream.PastFrames = 0;
- stream.FutureFrames = 0;
- stream.pInputSurface = input_view.Get();
- RECT dest_rect = gfx::Rect(swap_chain_size_).ToRECT();
- video_context->VideoProcessorSetOutputTargetRect(video_processor.Get(),
- TRUE, &dest_rect);
- video_context->VideoProcessorSetStreamDestRect(video_processor.Get(), 0,
- TRUE, &dest_rect);
- RECT source_rect = content_rect.ToRECT();
- video_context->VideoProcessorSetStreamSourceRect(video_processor.Get(), 0,
- TRUE, &source_rect);
-
- if (!output_view_) {
- Microsoft::WRL::ComPtr<ID3D11Texture2D> swap_chain_buffer;
- swap_chain_->GetBuffer(0, IID_PPV_ARGS(&swap_chain_buffer));
-
- D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC output_desc = {};
- output_desc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D;
- output_desc.Texture2D.MipSlice = 0;
-
- hr = video_device->CreateVideoProcessorOutputView(
- swap_chain_buffer.Get(), video_processor_enumerator.Get(),
- &output_desc, &output_view_);
- if (FAILED(hr)) {
- DLOG(ERROR) << "CreateVideoProcessorOutputView failed with error 0x"
- << std::hex << hr;
- return false;
- }
- DCHECK(output_view_);
- }
-
- hr = video_context->VideoProcessorBlt(video_processor.Get(),
- output_view_.Get(), 0, 1, &stream);
- if (FAILED(hr)) {
- DLOG(ERROR) << "VideoProcessorBlt failed with error 0x" << std::hex << hr;
- return false;
- }
- }
-
- return true;
-}
-
-void DCLayerTree::SwapChainPresenter::ReleaseSwapChainResources() {
- output_view_.Reset();
- swap_chain_.Reset();
- decode_surface_.Reset();
- decode_swap_chain_.Reset();
- decode_resource_.Reset();
- swap_chain_handle_.Close();
- staging_texture_.Reset();
-}
-
-bool DCLayerTree::SwapChainPresenter::ReallocateSwapChain(
- const gfx::Size& swap_chain_size,
- bool use_yuv_swap_chain,
- ui::ProtectedVideoType protected_video_type,
- bool z_order) {
- TRACE_EVENT2("gpu", "SwapChainPresenter::ReallocateSwapChain", "size",
- swap_chain_size.ToString(), "yuv", use_yuv_swap_chain);
-
- DCHECK(!swap_chain_size.IsEmpty());
- swap_chain_size_ = swap_chain_size;
-
- // ResizeBuffers can't change YUV flags so only attempt it when size changes.
- if (swap_chain_ && (is_yuv_swapchain_ == use_yuv_swap_chain) &&
- (protected_video_type_ == protected_video_type)) {
- output_view_.Reset();
- DXGI_SWAP_CHAIN_DESC1 desc = {};
- swap_chain_->GetDesc1(&desc);
- HRESULT hr = swap_chain_->ResizeBuffers(
- desc.BufferCount, swap_chain_size.width(), swap_chain_size.height(),
- desc.Format, desc.Flags);
- if (SUCCEEDED(hr))
- return true;
- DLOG(ERROR) << "ResizeBuffers failed with error 0x" << std::hex << hr;
- }
-
- protected_video_type_ = protected_video_type;
-
- if (is_yuv_swapchain_ != use_yuv_swap_chain) {
- UMA_HISTOGRAM_COUNTS_1000(
- "GPU.DirectComposition.FramesSinceColorSpaceChange",
- frames_since_color_space_change_);
- frames_since_color_space_change_ = 0;
- }
- is_yuv_swapchain_ = false;
-
- ReleaseSwapChainResources();
-
- Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
- d3d11_device_.As(&dxgi_device);
- DCHECK(dxgi_device);
- Microsoft::WRL::ComPtr<IDXGIAdapter> dxgi_adapter;
- dxgi_device->GetAdapter(&dxgi_adapter);
- DCHECK(dxgi_adapter);
- Microsoft::WRL::ComPtr<IDXGIFactoryMedia> media_factory;
- dxgi_adapter->GetParent(IID_PPV_ARGS(&media_factory));
- DCHECK(media_factory);
-
- // The composition surface handle is only used to create YUV swap chains since
- // CreateSwapChainForComposition can't do that.
- HANDLE handle = INVALID_HANDLE_VALUE;
- if (!CreateSurfaceHandleHelper(&handle))
- return false;
- swap_chain_handle_.Set(handle);
-
- first_present_ = true;
-
- DXGI_SWAP_CHAIN_DESC1 desc = {};
- desc.Width = swap_chain_size_.width();
- desc.Height = swap_chain_size_.height();
- desc.Format = g_overlay_dxgi_format_used;
- desc.Stereo = FALSE;
- desc.SampleDesc.Count = 1;
- desc.BufferCount = 2;
- desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
- desc.Scaling = DXGI_SCALING_STRETCH;
- desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
- desc.Flags =
- DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO | DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO;
- if (IsProtectedVideo(protected_video_type))
- desc.Flags |= DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
- if (protected_video_type == ui::ProtectedVideoType::kHardwareProtected)
- desc.Flags |= DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
- desc.AlphaMode = DXGI_ALPHA_MODE_IGNORE;
-
- const std::string kSwapChainCreationResultByFormatUmaPrefix =
- "GPU.DirectComposition.SwapChainCreationResult2.";
-
- const std::string kSwapChainCreationResultByVideoTypeUmaPrefix =
- "GPU.DirectComposition.SwapChainCreationResult3.";
- const std::string protected_video_type_string =
- ProtectedVideoTypeToString(protected_video_type);
-
- if (use_yuv_swap_chain) {
- TRACE_EVENT1("gpu", "SwapChainPresenter::ReallocateSwapChain::YUV",
- "format", OverlayFormatToString(g_overlay_format_used));
- HRESULT hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
- d3d11_device_.Get(), swap_chain_handle_.Get(), &desc, nullptr,
- &swap_chain_);
- is_yuv_swapchain_ = SUCCEEDED(hr);
- failed_to_create_yuv_swapchain_ = !is_yuv_swapchain_;
-
- base::UmaHistogramSparse(kSwapChainCreationResultByFormatUmaPrefix +
- OverlayFormatToString(g_overlay_format_used),
- hr);
- base::UmaHistogramSparse(kSwapChainCreationResultByVideoTypeUmaPrefix +
- protected_video_type_string,
- hr);
-
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to create "
- << OverlayFormatToString(g_overlay_format_used)
- << " swap chain of size " << swap_chain_size.ToString()
- << " with error 0x" << std::hex << hr
- << "\nFalling back to BGRA";
- }
- }
- if (!is_yuv_swapchain_) {
- TRACE_EVENT0("gpu", "SwapChainPresenter::ReallocateSwapChain::BGRA");
- desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
- desc.Flags = 0;
- if (IsProtectedVideo(protected_video_type))
- desc.Flags |= DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
- if (protected_video_type == ui::ProtectedVideoType::kHardwareProtected)
- desc.Flags |= DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
-
- HRESULT hr = media_factory->CreateSwapChainForCompositionSurfaceHandle(
- d3d11_device_.Get(), swap_chain_handle_.Get(), &desc, nullptr,
- &swap_chain_);
-
- base::UmaHistogramSparse(kSwapChainCreationResultByFormatUmaPrefix +
- OverlayFormatToString(OverlayFormat::kBGRA),
- hr);
- base::UmaHistogramSparse(kSwapChainCreationResultByVideoTypeUmaPrefix +
- protected_video_type_string,
- hr);
-
- if (FAILED(hr)) {
- // Disable overlay support so dc_layer_overlay will stop sending down
- // overlay frames here and uses GL Composition instead.
- g_supports_overlays = false;
- DLOG(ERROR) << "Failed to create BGRA swap chain of size "
- << swap_chain_size.ToString() << " with error 0x" << std::hex
- << hr << ". Disable overlay swap chains";
- return false;
- }
- }
- return true;
-}
-
-bool DCLayerTree::CommitAndClearPendingOverlays(
- DirectCompositionChildSurfaceWin* root_surface) {
- TRACE_EVENT1("gpu", "DCLayerTree::CommitAndClearPendingOverlays",
- "num_pending_overlays", pending_overlays_.size());
- DCHECK(!needs_commit_);
- // Check if root surface visual needs a commit first.
- if (!root_surface_visual_) {
- dcomp_device_->CreateVisual(&root_surface_visual_);
- needs_commit_ = true;
- }
-
- if (root_surface->swap_chain() != root_swap_chain_ ||
- root_surface->dcomp_surface() != root_dcomp_surface_ ||
- root_surface->dcomp_surface_serial() != root_dcomp_surface_serial_) {
- root_swap_chain_ = root_surface->swap_chain();
- root_dcomp_surface_ = root_surface->dcomp_surface();
- root_dcomp_surface_serial_ = root_surface->dcomp_surface_serial();
- root_surface_visual_->SetContent(
- root_swap_chain_ ? static_cast<IUnknown*>(root_swap_chain_.Get())
- : static_cast<IUnknown*>(root_dcomp_surface_.Get()));
- needs_commit_ = true;
- }
-
- std::vector<std::unique_ptr<ui::DCRendererLayerParams>> overlays;
- std::swap(pending_overlays_, overlays);
-
- // Sort layers by z-order.
- std::sort(overlays.begin(), overlays.end(),
- [](const auto& a, const auto& b) -> bool {
- return a->z_order < b->z_order;
- });
-
- // If we need to grow or shrink swap chain presenters, we'll need to add or
- // remove visuals.
- if (video_swap_chains_.size() != overlays.size()) {
- // Grow or shrink list of swap chain presenters to match pending overlays.
- std::vector<std::unique_ptr<SwapChainPresenter>> new_video_swap_chains;
- for (size_t i = 0; i < overlays.size(); ++i) {
- // TODO(sunnyps): Try to find a matching swap chain based on size, type of
- // swap chain, gl image, etc.
- if (i < video_swap_chains_.size()) {
- new_video_swap_chains.emplace_back(std::move(video_swap_chains_[i]));
- } else {
- new_video_swap_chains.emplace_back(std::make_unique<SwapChainPresenter>(
- this, d3d11_device_, dcomp_device_));
- }
- }
- video_swap_chains_.swap(new_video_swap_chains);
- needs_commit_ = true;
- }
-
- // Present to each swap chain.
- for (size_t i = 0; i < overlays.size(); ++i) {
- auto& video_swap_chain = video_swap_chains_[i];
- if (!video_swap_chain->PresentToSwapChain(*overlays[i])) {
- DLOG(ERROR) << "PresentToSwapChain failed";
- return false;
- }
- }
-
- // Rebuild visual tree and commit if any visual changed.
- if (needs_commit_) {
- TRACE_EVENT0("gpu", "DCLayerTree::CommitAndClearPendingOverlays::Commit");
- needs_commit_ = false;
- dcomp_root_visual_->RemoveAllVisuals();
-
- // Add layers with negative z-order first.
- size_t i = 0;
- for (; i < overlays.size() && overlays[i]->z_order < 0; ++i) {
- IDCompositionVisual2* visual = video_swap_chains_[i]->visual().Get();
- // We call AddVisual with insertAbove FALSE and referenceVisual nullptr
- // which is equivalent to saying that the visual should be below no other
- // visual, or in other words it should be above all other visuals.
- dcomp_root_visual_->AddVisual(visual, FALSE, nullptr);
- }
-
- // Add root surface visual at z-order 0.
- dcomp_root_visual_->AddVisual(root_surface_visual_.Get(), FALSE, nullptr);
-
- // Add visuals with positive z-order.
- for (; i < overlays.size(); ++i) {
- // There shouldn't be a layer with z-order 0. Otherwise, we can't tell
- // its order with respect to root surface.
- DCHECK_GT(overlays[i]->z_order, 0);
- IDCompositionVisual2* visual = video_swap_chains_[i]->visual().Get();
- dcomp_root_visual_->AddVisual(visual, FALSE, nullptr);
- }
-
- HRESULT hr = dcomp_device_->Commit();
- if (FAILED(hr)) {
- DLOG(ERROR) << "Commit failed with error 0x" << std::hex << hr;
- return false;
- }
- }
-
- return true;
-}
-
-bool DCLayerTree::ScheduleDCLayer(const ui::DCRendererLayerParams& params) {
- pending_overlays_.push_back(
- std::make_unique<ui::DCRendererLayerParams>(params));
- return true;
-}
-
-DirectCompositionSurfaceWin::DirectCompositionSurfaceWin(
- std::unique_ptr<gfx::VSyncProvider> vsync_provider,
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- HWND parent_window)
- : gl::GLSurfaceEGL(),
- child_window_(delegate, parent_window),
- root_surface_(new DirectCompositionChildSurfaceWin()),
- layer_tree_(std::make_unique<DCLayerTree>(
- delegate->GetFeatureInfo()->workarounds())),
- vsync_provider_(std::move(vsync_provider)),
- presentation_helper_(std::make_unique<gl::GLSurfacePresentationHelper>(
- vsync_provider_.get())) {}
-
-DirectCompositionSurfaceWin::~DirectCompositionSurfaceWin() {
- Destroy();
-}
-
-// static
-bool DirectCompositionSurfaceWin::IsDirectCompositionSupported() {
- static const bool supported = [] {
- base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
- if (command_line->HasSwitch(switches::kDisableDirectComposition))
- return false;
-
- // Blacklist direct composition if MCTU.dll or MCTUX.dll are injected. These
- // are user mode drivers for display adapters from Magic Control Technology
- // Corporation.
- if (GetModuleHandle(TEXT("MCTU.dll")) ||
- GetModuleHandle(TEXT("MCTUX.dll"))) {
- DLOG(ERROR) << "Blacklisted due to third party modules";
- return false;
- }
-
- // Flexible surface compatibility is required to be able to MakeCurrent with
- // the default pbuffer surface.
- if (!gl::GLSurfaceEGL::IsEGLFlexibleSurfaceCompatibilitySupported()) {
- DLOG(ERROR) << "EGL_ANGLE_flexible_surface_compatibility not supported";
- return false;
- }
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
- if (!d3d11_device) {
- DLOG(ERROR) << "Failed to retrieve D3D11 device";
- return false;
- }
-
- // This will fail if the D3D device is "Microsoft Basic Display Adapter".
- Microsoft::WRL::ComPtr<ID3D11VideoDevice> video_device;
- if (FAILED(d3d11_device.As(&video_device))) {
- DLOG(ERROR) << "Failed to retrieve video device";
- return false;
- }
-
- // This will fail if DirectComposition DLL can't be loaded.
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device =
- gl::QueryDirectCompositionDevice(d3d11_device);
- if (!dcomp_device) {
- DLOG(ERROR) << "Failed to retrieve direct composition device";
- return false;
- }
-
- return true;
- }();
- return supported;
-}
-
-// static
-bool DirectCompositionSurfaceWin::AreOverlaysSupported() {
- // Always initialize and record overlay support information irrespective of
- // command line flags.
- InitializeHardwareOverlaySupport();
-
- base::CommandLine* command_line = base::CommandLine::ForCurrentProcess();
- // Enable flag should be checked before the disable flag, so we could
- // overwrite GPU driver bug workarounds in testing.
- if (command_line->HasSwitch(switches::kEnableDirectCompositionLayers))
- return true;
- if (command_line->HasSwitch(switches::kDisableDirectCompositionLayers))
- return false;
-
- return g_supports_overlays;
-}
-
-// static
-OverlayCapabilities DirectCompositionSurfaceWin::GetOverlayCapabilities() {
- InitializeHardwareOverlaySupport();
- OverlayCapabilities capabilities;
- for (const auto& info : g_overlay_support_info) {
- if (info.flags) {
- OverlayCapability cap;
- cap.format = info.overlay_format;
- cap.is_scaling_supported =
- !!(info.flags & DXGI_OVERLAY_SUPPORT_FLAG_SCALING);
- capabilities.push_back(cap);
- }
- }
- return capabilities;
-}
-
-// static
-void DirectCompositionSurfaceWin::SetScaledOverlaysSupportedForTesting(
- bool value) {
- g_supports_scaled_overlays = value;
-}
-
-// static
-void DirectCompositionSurfaceWin::SetPreferNV12OverlaysForTesting() {
- g_overlay_format_used = OverlayFormat::kNV12;
- g_overlay_dxgi_format_used = DXGI_FORMAT_NV12;
-}
-
-// static
-bool DirectCompositionSurfaceWin::IsHDRSupported() {
- // HDR support was introduced in Windows 10 Creators Update.
- if (base::win::GetVersion() < base::win::VERSION_WIN10_RS2)
- return false;
-
- HRESULT hr = S_OK;
- Microsoft::WRL::ComPtr<IDXGIFactory> factory;
- hr = CreateDXGIFactory(IID_PPV_ARGS(&factory));
- if (FAILED(hr)) {
- DLOG(ERROR) << "Failed to create DXGI factory.";
- return false;
- }
-
- bool hdr_monitor_found = false;
- for (UINT adapter_index = 0;; ++adapter_index) {
- Microsoft::WRL::ComPtr<IDXGIAdapter> adapter;
- hr = factory->EnumAdapters(adapter_index, &adapter);
- if (hr == DXGI_ERROR_NOT_FOUND)
- break;
- if (FAILED(hr)) {
- DLOG(ERROR) << "Unexpected error creating DXGI adapter.";
- break;
- }
-
- for (UINT output_index = 0;; ++output_index) {
- Microsoft::WRL::ComPtr<IDXGIOutput> output;
- hr = adapter->EnumOutputs(output_index, &output);
- if (hr == DXGI_ERROR_NOT_FOUND)
- break;
- if (FAILED(hr)) {
- DLOG(ERROR) << "Unexpected error creating DXGI adapter.";
- break;
- }
-
- Microsoft::WRL::ComPtr<IDXGIOutput6> output6;
- hr = output->QueryInterface(IID_PPV_ARGS(&output6));
- if (FAILED(hr)) {
- DLOG(WARNING) << "IDXGIOutput6 is required for HDR detection.";
- continue;
- }
-
- DXGI_OUTPUT_DESC1 desc;
- if (FAILED(output6->GetDesc1(&desc))) {
- DLOG(ERROR) << "Unexpected error getting output descriptor.";
- continue;
- }
-
- base::UmaHistogramSparse("GPU.Output.ColorSpace", desc.ColorSpace);
- base::UmaHistogramSparse("GPU.Output.MaxLuminance", desc.MaxLuminance);
-
- if (desc.ColorSpace == DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020) {
- hdr_monitor_found = true;
- }
- }
- }
-
- UMA_HISTOGRAM_BOOLEAN("GPU.Output.HDR", hdr_monitor_found);
- return hdr_monitor_found;
-}
-
-// static
-bool DirectCompositionSurfaceWin::Initialize(gl::GLSurfaceFormat format) {
- d3d11_device_ = gl::QueryD3D11DeviceObjectFromANGLE();
- if (!d3d11_device_) {
- DLOG(ERROR) << "Failed to retrieve D3D11 device from ANGLE";
- return false;
- }
-
- dcomp_device_ = gl::QueryDirectCompositionDevice(d3d11_device_);
- if (!dcomp_device_) {
- DLOG(ERROR)
- << "Failed to retrieve direct compostion device from D3D11 device";
- return false;
- }
-
- if (!child_window_.Initialize()) {
- DLOG(ERROR) << "Failed to initialize native window";
- return false;
- }
- window_ = child_window_.window();
-
- if (!layer_tree_->Initialize(window_, d3d11_device_, dcomp_device_))
- return false;
-
- if (!root_surface_->Initialize(gl::GLSurfaceFormat()))
- return false;
-
- return true;
-}
-
-void DirectCompositionSurfaceWin::Destroy() {
- // Destroy presentation helper first because its dtor calls GetHandle.
- presentation_helper_ = nullptr;
- root_surface_->Destroy();
-}
-
-gfx::Size DirectCompositionSurfaceWin::GetSize() {
- return root_surface_->GetSize();
-}
-
-bool DirectCompositionSurfaceWin::IsOffscreen() {
- return false;
-}
-
-void* DirectCompositionSurfaceWin::GetHandle() {
- return root_surface_->GetHandle();
-}
-
-bool DirectCompositionSurfaceWin::Resize(const gfx::Size& size,
- float scale_factor,
- ColorSpace color_space,
- bool has_alpha) {
- // Force a resize and redraw (but not a move, activate, etc.).
- if (!SetWindowPos(window_, nullptr, 0, 0, size.width(), size.height(),
- SWP_NOMOVE | SWP_NOACTIVATE | SWP_NOCOPYBITS |
- SWP_NOOWNERZORDER | SWP_NOZORDER)) {
- return false;
- }
- return root_surface_->Resize(size, scale_factor, color_space, has_alpha);
-}
-
-gfx::SwapResult DirectCompositionSurfaceWin::SwapBuffers(
- PresentationCallback callback) {
- TRACE_EVENT0("gpu", "DirectCompositionSurfaceWin::SwapBuffers");
- gl::GLSurfacePresentationHelper::ScopedSwapBuffers scoped_swap_buffers(
- presentation_helper_.get(), std::move(callback));
-
- bool succeeded = true;
- if (root_surface_->SwapBuffers(PresentationCallback()) ==
- gfx::SwapResult::SWAP_FAILED)
- succeeded = false;
-
- if (!layer_tree_->CommitAndClearPendingOverlays(root_surface_.get()))
- succeeded = false;
-
- auto swap_result =
- succeeded ? gfx::SwapResult::SWAP_ACK : gfx::SwapResult::SWAP_FAILED;
- scoped_swap_buffers.set_result(swap_result);
- return swap_result;
-}
-
-gfx::SwapResult DirectCompositionSurfaceWin::PostSubBuffer(
- int x,
- int y,
- int width,
- int height,
- PresentationCallback callback) {
- // The arguments are ignored because SetDrawRectangle specified the area to
- // be swapped.
- return SwapBuffers(std::move(callback));
-}
-
-gfx::VSyncProvider* DirectCompositionSurfaceWin::GetVSyncProvider() {
- return vsync_provider_.get();
-}
-
-void DirectCompositionSurfaceWin::SetVSyncEnabled(bool enabled) {
- root_surface_->SetVSyncEnabled(enabled);
-}
-
-bool DirectCompositionSurfaceWin::ScheduleDCLayer(
- const ui::DCRendererLayerParams& params) {
- return layer_tree_->ScheduleDCLayer(params);
-}
-
-bool DirectCompositionSurfaceWin::SetEnableDCLayers(bool enable) {
- return root_surface_->SetEnableDCLayers(enable);
-}
-
-bool DirectCompositionSurfaceWin::FlipsVertically() const {
- return true;
-}
-
-bool DirectCompositionSurfaceWin::SupportsPresentationCallback() {
- return true;
-}
-
-bool DirectCompositionSurfaceWin::SupportsPostSubBuffer() {
- return true;
-}
-
-bool DirectCompositionSurfaceWin::OnMakeCurrent(gl::GLContext* context) {
- if (presentation_helper_)
- presentation_helper_->OnMakeCurrent(context, this);
- return root_surface_->OnMakeCurrent(context);
-}
-
-bool DirectCompositionSurfaceWin::SupportsDCLayers() const {
- return true;
-}
-
-bool DirectCompositionSurfaceWin::UseOverlaysForVideo() const {
- return AreOverlaysSupported();
-}
-
-bool DirectCompositionSurfaceWin::SupportsProtectedVideo() const {
- // TODO(magchen): Check the gpu driver date (or a function) which we know this
- // new support is enabled.
- return AreOverlaysSupported();
-}
-
-bool DirectCompositionSurfaceWin::SetDrawRectangle(const gfx::Rect& rectangle) {
- return root_surface_->SetDrawRectangle(rectangle);
-}
-
-gfx::Vector2d DirectCompositionSurfaceWin::GetDrawOffset() const {
- return root_surface_->GetDrawOffset();
-}
-
-scoped_refptr<base::TaskRunner>
-DirectCompositionSurfaceWin::GetWindowTaskRunnerForTesting() {
- return child_window_.GetTaskRunnerForTesting();
-}
-
-Microsoft::WRL::ComPtr<IDXGISwapChain1>
-DirectCompositionSurfaceWin::GetLayerSwapChainForTesting(size_t index) const {
- return layer_tree_->GetLayerSwapChainForTesting(index);
-}
-
-Microsoft::WRL::ComPtr<IDXGISwapChain1>
-DirectCompositionSurfaceWin::GetBackbufferSwapChainForTesting() const {
- return root_surface_->swap_chain();
-}
-
-} // namespace gpu
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win.h b/chromium/gpu/ipc/service/direct_composition_surface_win.h
deleted file mode 100644
index b0217bb528f..00000000000
--- a/chromium/gpu/ipc/service/direct_composition_surface_win.h
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef GPU_IPC_SERVICE_DIRECT_COMPOSITION_SURFACE_WIN_H_
-#define GPU_IPC_SERVICE_DIRECT_COMPOSITION_SURFACE_WIN_H_
-
-#include <windows.h>
-#include <d3d11.h>
-#include <dcomp.h>
-#include <wrl/client.h>
-
-#include "base/memory/weak_ptr.h"
-#include "gpu/config/gpu_driver_bug_workarounds.h"
-#include "gpu/config/gpu_info.h"
-#include "gpu/ipc/service/child_window_win.h"
-#include "gpu/ipc/service/gpu_ipc_service_export.h"
-#include "gpu/ipc/service/image_transport_surface_delegate.h"
-#include "ui/gl/gl_image.h"
-#include "ui/gl/gl_surface_egl.h"
-
-namespace gl {
-class GLSurfacePresentationHelper;
-}
-
-namespace gpu {
-
-class DCLayerTree;
-class DirectCompositionChildSurfaceWin;
-
-class GPU_IPC_SERVICE_EXPORT DirectCompositionSurfaceWin
- : public gl::GLSurfaceEGL {
- public:
- DirectCompositionSurfaceWin(
- std::unique_ptr<gfx::VSyncProvider> vsync_provider,
- base::WeakPtr<ImageTransportSurfaceDelegate> delegate,
- HWND parent_window);
-
- // Returns true if direct composition is supported. We prefer to use direct
- // composition event without hardware overlays, because it allows us to bypass
- // blitting by DWM to the window redirection surface by using a flip mode swap
- // chain. Overridden with --disable-direct-composition.
- static bool IsDirectCompositionSupported();
-
- // Returns true if hardware overlays are supported, and DirectComposition
- // surface and layers should be used. Overridden with
- // --enable-direct-composition-layers and --disable-direct-composition-layers.
- static bool AreOverlaysSupported();
-
- // Returns a list of supported overlay formats for GPUInfo. This does not
- // depend on finch features or command line flags.
- static OverlayCapabilities GetOverlayCapabilities();
-
- // Returns true if there is an HDR capable display connected.
- static bool IsHDRSupported();
-
- static void SetScaledOverlaysSupportedForTesting(bool value);
-
- static void SetPreferNV12OverlaysForTesting();
-
- bool InitializeNativeWindow();
-
- // GLSurfaceEGL implementation.
- bool Initialize(gl::GLSurfaceFormat format) override;
- void Destroy() override;
- gfx::Size GetSize() override;
- bool IsOffscreen() override;
- void* GetHandle() override;
- bool Resize(const gfx::Size& size,
- float scale_factor,
- ColorSpace color_space,
- bool has_alpha) override;
- gfx::SwapResult SwapBuffers(PresentationCallback callback) override;
- gfx::SwapResult PostSubBuffer(int x,
- int y,
- int width,
- int height,
- PresentationCallback callback) override;
- gfx::VSyncProvider* GetVSyncProvider() override;
- void SetVSyncEnabled(bool enabled) override;
- bool SetEnableDCLayers(bool enable) override;
- bool FlipsVertically() const override;
- bool SupportsPresentationCallback() override;
- bool SupportsPostSubBuffer() override;
- bool OnMakeCurrent(gl::GLContext* context) override;
- bool SupportsDCLayers() const override;
- bool UseOverlaysForVideo() const override;
- bool SupportsProtectedVideo() const override;
- bool SetDrawRectangle(const gfx::Rect& rect) override;
- gfx::Vector2d GetDrawOffset() const override;
-
- // This schedules an overlay plane to be displayed on the next SwapBuffers
- // or PostSubBuffer call. Overlay planes must be scheduled before every swap
- // to remain in the layer tree. This surface's backbuffer doesn't have to be
- // scheduled with ScheduleDCLayer, as it's automatically placed in the layer
- // tree at z-order 0.
- bool ScheduleDCLayer(const ui::DCRendererLayerParams& params) override;
-
- scoped_refptr<base::TaskRunner> GetWindowTaskRunnerForTesting();
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> GetLayerSwapChainForTesting(
- size_t index) const;
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> GetBackbufferSwapChainForTesting()
- const;
-
- protected:
- ~DirectCompositionSurfaceWin() override;
-
- private:
- HWND window_ = nullptr;
- ChildWindowWin child_window_;
-
- scoped_refptr<DirectCompositionChildSurfaceWin> root_surface_;
- std::unique_ptr<DCLayerTree> layer_tree_;
- std::unique_ptr<gfx::VSyncProvider> vsync_provider_;
- std::unique_ptr<gl::GLSurfacePresentationHelper> presentation_helper_;
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device_;
- Microsoft::WRL::ComPtr<IDCompositionDevice2> dcomp_device_;
-
- DISALLOW_COPY_AND_ASSIGN(DirectCompositionSurfaceWin);
-};
-
-} // namespace gpu
-
-#endif // GPU_IPC_SERVICE_DIRECT_COMPOSITION_SURFACE_WIN_H_
diff --git a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc b/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
deleted file mode 100644
index 88a31959ad4..00000000000
--- a/chromium/gpu/ipc/service/direct_composition_surface_win_unittest.cc
+++ /dev/null
@@ -1,1246 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "gpu/ipc/service/direct_composition_surface_win.h"
-
-#include "base/bind_helpers.h"
-#include "base/memory/ref_counted_memory.h"
-#include "base/memory/weak_ptr.h"
-#include "base/run_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/win/scoped_gdi_object.h"
-#include "base/win/scoped_hdc.h"
-#include "base/win/scoped_select_object.h"
-#include "gpu/command_buffer/common/swap_buffers_complete_params.h"
-#include "gpu/command_buffer/service/feature_info.h"
-#include "gpu/config/gpu_preferences.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "ui/base/win/hidden_window.h"
-#include "ui/gfx/buffer_format_util.h"
-#include "ui/gfx/gdi_util.h"
-#include "ui/gfx/transform.h"
-#include "ui/gl/dc_renderer_layer_params.h"
-#include "ui/gl/gl_angle_util_win.h"
-#include "ui/gl/gl_context.h"
-#include "ui/gl/gl_image_dxgi.h"
-#include "ui/gl/gl_image_ref_counted_memory.h"
-#include "ui/gl/init/gl_factory.h"
-#include "ui/platform_window/platform_window_delegate.h"
-#include "ui/platform_window/win/win_window.h"
-
-namespace gpu {
-namespace {
-
-bool CheckIfDCSupported() {
- if (!gl::QueryDirectCompositionDevice(
- gl::QueryD3D11DeviceObjectFromANGLE())) {
- LOG(WARNING)
- << "GL implementation not using DirectComposition, skipping test.";
- return false;
- }
- return true;
-}
-
-class TestImageTransportSurfaceDelegate
- : public ImageTransportSurfaceDelegate,
- public base::SupportsWeakPtr<TestImageTransportSurfaceDelegate> {
- public:
- TestImageTransportSurfaceDelegate()
- : feature_info_(new gpu::gles2::FeatureInfo()) {}
-
- ~TestImageTransportSurfaceDelegate() override {}
-
- // ImageTransportSurfaceDelegate implementation.
- void DidCreateAcceleratedSurfaceChildWindow(
- SurfaceHandle parent_window,
- SurfaceHandle child_window) override {
- if (parent_window)
- ::SetParent(child_window, parent_window);
- }
- void DidSwapBuffersComplete(SwapBuffersCompleteParams params) override {}
- const gles2::FeatureInfo* GetFeatureInfo() const override {
- return feature_info_.get();
- }
- const GpuPreferences& GetGpuPreferences() const override {
- return gpu_preferences_;
- }
- void BufferPresented(const gfx::PresentationFeedback& feedback) override {}
- void AddFilter(IPC::MessageFilter* message_filter) override {}
- int32_t GetRouteID() const override { return 0; }
-
- private:
- scoped_refptr<gpu::gles2::FeatureInfo> feature_info_;
- GpuPreferences gpu_preferences_;
-};
-
-class TestPlatformDelegate : public ui::PlatformWindowDelegate {
- public:
- // ui::PlatformWindowDelegate implementation.
- void OnBoundsChanged(const gfx::Rect& new_bounds) override {}
- void OnDamageRect(const gfx::Rect& damaged_region) override {}
- void DispatchEvent(ui::Event* event) override {}
- void OnCloseRequest() override {}
- void OnClosed() override {}
- void OnWindowStateChanged(ui::PlatformWindowState new_state) override {}
- void OnLostCapture() override {}
- void OnAcceleratedWidgetAvailable(gfx::AcceleratedWidget widget) override {}
- void OnAcceleratedWidgetDestroyed() override {}
- void OnActivationChanged(bool active) override {}
-};
-
-void RunPendingTasks(scoped_refptr<base::TaskRunner> task_runner) {
- base::WaitableEvent done(base::WaitableEvent::ResetPolicy::AUTOMATIC,
- base::WaitableEvent::InitialState::NOT_SIGNALED);
- task_runner->PostTask(FROM_HERE,
- Bind(&base::WaitableEvent::Signal, Unretained(&done)));
- done.Wait();
-}
-
-void DestroySurface(scoped_refptr<DirectCompositionSurfaceWin> surface) {
- scoped_refptr<base::TaskRunner> task_runner =
- surface->GetWindowTaskRunnerForTesting();
- DCHECK(surface->HasOneRef());
-
- surface = nullptr;
-
- // Ensure that the ChildWindowWin posts the task to delete the thread to the
- // main loop before doing RunUntilIdle. Otherwise the child threads could
- // outlive the main thread.
- RunPendingTasks(task_runner);
-
- base::RunLoop().RunUntilIdle();
-}
-
-Microsoft::WRL::ComPtr<ID3D11Texture2D> CreateNV12Texture(
- const Microsoft::WRL::ComPtr<ID3D11Device>& d3d11_device,
- const gfx::Size& size,
- bool shared) {
- D3D11_TEXTURE2D_DESC desc = {};
- desc.Width = size.width();
- desc.Height = size.height();
- desc.MipLevels = 1;
- desc.ArraySize = 1;
- desc.Format = DXGI_FORMAT_NV12;
- desc.Usage = D3D11_USAGE_DEFAULT;
- desc.SampleDesc.Count = 1;
- desc.BindFlags = 0;
- if (shared) {
- desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX |
- D3D11_RESOURCE_MISC_SHARED_NTHANDLE;
- }
-
- std::vector<char> image_data(size.width() * size.height() * 3 / 2);
- // Y, U, and V should all be 160. Output color should be pink.
- memset(&image_data[0], 160, size.width() * size.height() * 3 / 2);
-
- D3D11_SUBRESOURCE_DATA data = {};
- data.pSysMem = (const void*)&image_data[0];
- data.SysMemPitch = size.width();
-
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture;
- HRESULT hr = d3d11_device->CreateTexture2D(&desc, &data, &texture);
- CHECK(SUCCEEDED(hr));
- return texture;
-}
-
-TEST(DirectCompositionSurfaceTest, TestMakeCurrent) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
-
- scoped_refptr<DirectCompositionSurfaceWin> surface1(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface1->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context1 = gl::init::CreateGLContext(
- nullptr, surface1.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context1->MakeCurrent(surface1.get()));
-
- surface1->SetEnableDCLayers(true);
- EXPECT_TRUE(surface1->Resize(gfx::Size(100, 100), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
-
- // First SetDrawRectangle must be full size of surface.
- EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
-
- // SetDrawRectangle can't be called again until swap.
- EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface1->SwapBuffers(base::DoNothing()));
-
- EXPECT_TRUE(context1->IsCurrent(surface1.get()));
-
- // SetDrawRectangle must be contained within surface.
- EXPECT_FALSE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 101, 101)));
- EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(context1->IsCurrent(surface1.get()));
-
- EXPECT_TRUE(surface1->Resize(gfx::Size(50, 50), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(context1->IsCurrent(surface1.get()));
- EXPECT_TRUE(surface1->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(context1->IsCurrent(surface1.get()));
-
- scoped_refptr<DirectCompositionSurfaceWin> surface2(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface2->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context2 = gl::init::CreateGLContext(
- nullptr, surface2.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context2->MakeCurrent(surface2.get()));
-
- surface2->SetEnableDCLayers(true);
- EXPECT_TRUE(surface2->Resize(gfx::Size(100, 100), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- // The previous IDCompositionSurface should be suspended when another
- // surface is being drawn to.
- EXPECT_TRUE(surface2->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(context2->IsCurrent(surface2.get()));
-
- // It should be possible to switch back to the previous surface and
- // unsuspend it.
- EXPECT_TRUE(context1->MakeCurrent(surface1.get()));
- context2 = nullptr;
- context1 = nullptr;
-
- DestroySurface(std::move(surface1));
- DestroySurface(std::move(surface2));
-}
-
-// Tests that switching using EnableDCLayers works.
-TEST(DirectCompositionSurfaceTest, DXGIDCLayerSwitch) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
-
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
-
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_FALSE(surface->GetBackbufferSwapChainForTesting());
-
- // First SetDrawRectangle must be full size of surface for DXGI swapchain.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(surface->GetBackbufferSwapChainForTesting());
-
- // SetDrawRectangle and SetEnableDCLayers can't be called again until swap.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
- EXPECT_TRUE(context->IsCurrent(surface.get()));
-
- surface->SetEnableDCLayers(true);
-
- // Surface switched to use IDCompositionSurface, so must draw to entire
- // surface.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_FALSE(surface->GetBackbufferSwapChainForTesting());
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
- EXPECT_TRUE(context->IsCurrent(surface.get()));
-
- surface->SetEnableDCLayers(false);
-
- // Surface switched to use IDXGISwapChain, so must draw to entire surface.
- EXPECT_FALSE(surface->SetDrawRectangle(gfx::Rect(0, 0, 50, 50)));
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- EXPECT_TRUE(surface->GetBackbufferSwapChainForTesting());
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
- EXPECT_TRUE(context->IsCurrent(surface.get()));
-
- context = nullptr;
- DestroySurface(std::move(surface));
-}
-
-// Ensure that the swapchain's alpha is correct.
-TEST(DirectCompositionSurfaceTest, SwitchAlpha) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
-
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
-
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_FALSE(surface->GetBackbufferSwapChainForTesting());
-
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetBackbufferSwapChainForTesting();
- ASSERT_TRUE(swap_chain);
- DXGI_SWAP_CHAIN_DESC1 desc;
- swap_chain->GetDesc1(&desc);
- EXPECT_EQ(DXGI_ALPHA_MODE_PREMULTIPLIED, desc.AlphaMode);
-
- // Resize to the same parameters should have no effect.
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface->GetBackbufferSwapChainForTesting());
-
- EXPECT_TRUE(surface->Resize(gfx::Size(100, 100), 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, false));
- EXPECT_FALSE(surface->GetBackbufferSwapChainForTesting());
-
- EXPECT_TRUE(surface->SetDrawRectangle(gfx::Rect(0, 0, 100, 100)));
-
- swap_chain = surface->GetBackbufferSwapChainForTesting();
- ASSERT_TRUE(swap_chain);
- swap_chain->GetDesc1(&desc);
- EXPECT_EQ(DXGI_ALPHA_MODE_IGNORE, desc.AlphaMode);
-
- context = nullptr;
- DestroySurface(std::move(surface));
-}
-
-// Ensure that the GLImage isn't presented again unless it changes.
-TEST(DirectCompositionSurfaceTest, NoPresentTwice) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
-
- surface->SetEnableDCLayers(true);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
-
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
- image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
-
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(100, 100);
- surface->ScheduleDCLayer(params);
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_FALSE(swap_chain);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
- swap_chain = surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- UINT last_present_count = 0;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetLastPresentCount(&last_present_count)));
-
- // One present is normal, and a second present because it's the first frame
- // and the other buffer needs to be drawn to.
- EXPECT_EQ(2u, last_present_count);
-
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain2 =
- surface->GetLayerSwapChainForTesting(0);
- EXPECT_EQ(swap_chain2.Get(), swap_chain.Get());
-
- // It's the same image, so it should have the same swapchain.
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetLastPresentCount(&last_present_count)));
- EXPECT_EQ(2u, last_present_count);
-
- // The image changed, we should get a new present
- scoped_refptr<gl::GLImageDXGI> image_dxgi2(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi2->SetTexture(texture, 0);
- image_dxgi2->SetColorSpace(gfx::ColorSpace::CreateREC709());
-
- params.y_image = image_dxgi2;
- params.uv_image = image_dxgi2;
- surface->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain3 =
- surface->GetLayerSwapChainForTesting(0);
- EXPECT_TRUE(SUCCEEDED(swap_chain3->GetLastPresentCount(&last_present_count)));
- // the present count should increase with the new present
- EXPECT_EQ(3u, last_present_count);
-
- context = nullptr;
- DestroySurface(std::move(surface));
-}
-
-// Ensure the swapchain size is set to the correct size if HW overlay scaling
-// is support - swapchain should be the minimum of the decoded
-// video buffer size and the onscreen video size
-TEST(DirectCompositionSurfaceTest, SwapchainSizeWithScaledOverlays) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
-
- surface->SetEnableDCLayers(true);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(64, 64);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
-
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
- image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
-
- // HW supports scaled overlays
- // The input texture size is maller than the window size.
- surface->SetScaledOverlaysSupportedForTesting(true);
-
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(100, 100);
- surface->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC Desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
- EXPECT_EQ((int)Desc.BufferDesc.Width, texture_size.width());
- EXPECT_EQ((int)Desc.BufferDesc.Height, texture_size.height());
-
- // Clear SwapChainPresenters
- // Must do Clear first because the swap chain won't resize immediately if
- // a new size is given unless this is the very first time after Clear.
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
- // The input texture size is bigger than the window size.
- params.quad_rect = gfx::Rect(32, 48);
-
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain2 =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain2);
-
- EXPECT_TRUE(SUCCEEDED(swap_chain2->GetDesc(&Desc)));
- EXPECT_EQ((int)Desc.BufferDesc.Width, params.quad_rect.width());
- EXPECT_EQ((int)Desc.BufferDesc.Height, params.quad_rect.height());
-
- context = nullptr;
- DestroySurface(std::move(surface));
-}
-
-// Ensure the swapchain size is set to the correct size if HW overlay scaling
-// is not support - swapchain should be the onscreen video size
-TEST(DirectCompositionSurfaceTest, SwapchainSizeWithoutScaledOverlays) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
-
- surface->SetEnableDCLayers(true);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(80, 80);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
-
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
- image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
-
- // HW doesn't support scaled overlays
- // The input texture size is bigger than the window size.
- surface->SetScaledOverlaysSupportedForTesting(false);
-
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(42, 42);
- surface->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&desc)));
- EXPECT_EQ((int)desc.BufferDesc.Width, params.quad_rect.width());
- EXPECT_EQ((int)desc.BufferDesc.Height, params.quad_rect.height());
-
- // The input texture size is smaller than the window size.
- params.quad_rect = gfx::Rect(124, 136);
-
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK, surface->SwapBuffers(base::DoNothing()));
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain2 =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain2);
-
- EXPECT_TRUE(SUCCEEDED(swap_chain2->GetDesc(&desc)));
- EXPECT_EQ((int)desc.BufferDesc.Width, params.quad_rect.width());
- EXPECT_EQ((int)desc.BufferDesc.Height, params.quad_rect.height());
-
- context = nullptr;
- DestroySurface(std::move(surface));
-}
-
-// Test protected video flags
-TEST(DirectCompositionSurfaceTest, ProtectedVideos) {
- if (!CheckIfDCSupported())
- return;
-
- TestImageTransportSurfaceDelegate delegate;
- scoped_refptr<DirectCompositionSurfaceWin> surface(
- new DirectCompositionSurfaceWin(nullptr, delegate.AsWeakPtr(),
- ui::GetHiddenWindow()));
- EXPECT_TRUE(surface->Initialize(gl::GLSurfaceFormat()));
-
- scoped_refptr<gl::GLContext> context =
- gl::init::CreateGLContext(nullptr, surface.get(), gl::GLContextAttribs());
- EXPECT_TRUE(context->MakeCurrent(surface.get()));
-
- surface->SetEnableDCLayers(true);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(1280, 720);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
-
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
- image_dxgi->SetColorSpace(gfx::ColorSpace::CreateREC709());
- gfx::Size window_size(640, 360);
-
- // Clear video
- {
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.quad_rect = gfx::Rect(window_size);
- params.content_rect = gfx::Rect(texture_size);
- params.protected_video_type = ui::ProtectedVideoType::kClear;
-
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::DoNothing()));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC Desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
- unsigned display_only_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
- unsigned hw_protected_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
- EXPECT_EQ(display_only_flag, (unsigned)0);
- EXPECT_EQ(hw_protected_flag, (unsigned)0);
- }
-
- // Software protected video
- {
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.quad_rect = gfx::Rect(window_size);
- params.content_rect = gfx::Rect(texture_size);
- params.protected_video_type = ui::ProtectedVideoType::kSoftwareProtected;
-
- surface->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface->SwapBuffers(base::DoNothing()));
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC Desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc(&Desc)));
- unsigned display_only_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
- unsigned hw_protected_flag = Desc.Flags & DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED;
- EXPECT_EQ(display_only_flag, (unsigned)DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY);
- EXPECT_EQ(hw_protected_flag, (unsigned)0);
- }
-
- // TODO(magchen): Add a hardware protected video test when hardware procted
- // video support is enabled by defaut in the Intel driver and Chrome
-
- context = nullptr;
- DestroySurface(std::move(surface));
-}
-
-std::vector<SkColor> ReadBackWindow(HWND window, const gfx::Size& size) {
- base::win::ScopedCreateDC mem_hdc(::CreateCompatibleDC(nullptr));
- DCHECK(mem_hdc.IsValid());
-
- BITMAPV4HEADER hdr;
- gfx::CreateBitmapV4Header(size.width(), size.height(), &hdr);
-
- void* bits = nullptr;
- base::win::ScopedBitmap bitmap(
- ::CreateDIBSection(mem_hdc.Get(), reinterpret_cast<BITMAPINFO*>(&hdr),
- DIB_RGB_COLORS, &bits, nullptr, 0));
- DCHECK(bitmap.is_valid());
-
- base::win::ScopedSelectObject select_object(mem_hdc.Get(), bitmap.get());
-
- // Grab a copy of the window. Use PrintWindow because it works even when the
- // window's partially occluded. The PW_RENDERFULLCONTENT flag is undocumented,
- // but works starting in Windows 8.1. It allows for capturing the contents of
- // the window that are drawn using DirectComposition.
- UINT flags = PW_CLIENTONLY | PW_RENDERFULLCONTENT;
-
- BOOL result = PrintWindow(window, mem_hdc.Get(), flags);
- if (!result)
- PLOG(ERROR) << "Failed to print window";
-
- GdiFlush();
-
- std::vector<SkColor> pixels(size.width() * size.height());
- memcpy(pixels.data(), bits, pixels.size() * sizeof(SkColor));
- return pixels;
-}
-
-SkColor ReadBackWindowPixel(HWND window, const gfx::Point& point) {
- gfx::Size size(point.x() + 1, point.y() + 1);
- auto pixels = ReadBackWindow(window, size);
- return pixels[size.width() * point.y() + point.x()];
-}
-
-class DirectCompositionPixelTest : public testing::Test {
- public:
- DirectCompositionPixelTest()
- : window_(&platform_delegate_, gfx::Rect(100, 100)) {}
-
- ~DirectCompositionPixelTest() override {
- context_ = nullptr;
- if (surface_)
- DestroySurface(std::move(surface_));
- }
-
- protected:
- void InitializeSurface() {
- static_cast<ui::PlatformWindow*>(&window_)->Show();
-
- surface_ = new DirectCompositionSurfaceWin(nullptr, delegate_.AsWeakPtr(),
- window_.hwnd());
- EXPECT_TRUE(surface_->Initialize(gl::GLSurfaceFormat()));
- context_ = gl::init::CreateGLContext(nullptr, surface_.get(),
- gl::GLContextAttribs());
- EXPECT_TRUE(context_->MakeCurrent(surface_.get()));
- }
-
- void PixelTestSwapChain(bool layers_enabled) {
- if (!CheckIfDCSupported())
- return;
-
- InitializeSurface();
-
- surface_->SetEnableDCLayers(layers_enabled);
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
-
- glClearColor(1.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- // Ensure DWM swap completed.
- Sleep(1000);
-
- SkColor expected_color = SK_ColorRED;
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_EQ(expected_color, actual_color)
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
-
- EXPECT_TRUE(context_->IsCurrent(surface_.get()));
- }
-
- TestPlatformDelegate platform_delegate_;
- TestImageTransportSurfaceDelegate delegate_;
- ui::WinWindow window_;
- scoped_refptr<DirectCompositionSurfaceWin> surface_;
- scoped_refptr<gl::GLContext> context_;
-};
-
-TEST_F(DirectCompositionPixelTest, DCLayersEnabled) {
- PixelTestSwapChain(true);
-}
-
-TEST_F(DirectCompositionPixelTest, DCLayersDisabled) {
- PixelTestSwapChain(false);
-}
-
-bool AreColorsSimilar(int a, int b) {
- // The precise colors may differ depending on the video processor, so allow
- // a margin for error.
- const int kMargin = 10;
- return abs(SkColorGetA(a) - SkColorGetA(b)) < kMargin &&
- abs(SkColorGetR(a) - SkColorGetR(b)) < kMargin &&
- abs(SkColorGetG(a) - SkColorGetG(b)) < kMargin &&
- abs(SkColorGetB(a) - SkColorGetB(b)) < kMargin;
-}
-
-class DirectCompositionVideoPixelTest : public DirectCompositionPixelTest {
- protected:
- void TestVideo(const gfx::ColorSpace& color_space,
- SkColor expected_color,
- bool check_color) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, false);
-
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- image_dxgi->SetTexture(texture, 0);
- image_dxgi->SetColorSpace(color_space);
-
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(texture_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- // Scaling up the swapchain with the same image should cause it to be
- // transformed again, but not presented again.
- params.quad_rect = gfx::Rect(window_size);
-
- surface_->ScheduleDCLayer(params);
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
- Sleep(1000);
-
- if (check_color) {
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
- }
- }
-};
-
-TEST_F(DirectCompositionVideoPixelTest, BT601) {
- TestVideo(gfx::ColorSpace::CreateREC601(), SkColorSetRGB(0xdb, 0x81, 0xe8),
- true);
-}
-
-TEST_F(DirectCompositionVideoPixelTest, BT709) {
- TestVideo(gfx::ColorSpace::CreateREC709(), SkColorSetRGB(0xe1, 0x90, 0xeb),
- true);
-}
-
-TEST_F(DirectCompositionVideoPixelTest, SRGB) {
- // SRGB doesn't make sense on an NV12 input, but don't crash.
- TestVideo(gfx::ColorSpace::CreateSRGB(), SK_ColorTRANSPARENT, false);
-}
-
-TEST_F(DirectCompositionVideoPixelTest, SCRGBLinear) {
- // SCRGB doesn't make sense on an NV12 input, but don't crash.
- TestVideo(gfx::ColorSpace::CreateSCRGBLinear(), SK_ColorTRANSPARENT, false);
-}
-
-TEST_F(DirectCompositionVideoPixelTest, InvalidColorSpace) {
- // Invalid color space should be treated as BT.709
- TestVideo(gfx::ColorSpace(), SkColorSetRGB(0xe1, 0x90, 0xeb), true);
-}
-
-TEST_F(DirectCompositionPixelTest, SoftwareVideoSwapchain) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size y_size(50, 50);
- gfx::Size uv_size(25, 25);
- size_t y_stride =
- gfx::RowSizeForBufferFormat(y_size.width(), gfx::BufferFormat::R_8, 0);
- size_t uv_stride =
- gfx::RowSizeForBufferFormat(uv_size.width(), gfx::BufferFormat::RG_88, 0);
- std::vector<uint8_t> y_data(y_stride * y_size.height(), 0xff);
- std::vector<uint8_t> uv_data(uv_stride * uv_size.height(), 0xff);
- auto y_image = base::MakeRefCounted<gl::GLImageRefCountedMemory>(y_size);
-
- y_image->Initialize(new base::RefCountedBytes(y_data),
- gfx::BufferFormat::R_8);
- auto uv_image = base::MakeRefCounted<gl::GLImageRefCountedMemory>(uv_size);
- uv_image->Initialize(new base::RefCountedBytes(uv_data),
- gfx::BufferFormat::RG_88);
- y_image->SetColorSpace(gfx::ColorSpace::CreateREC709());
-
- ui::DCRendererLayerParams params;
- params.y_image = y_image;
- params.uv_image = uv_image;
- params.content_rect = gfx::Rect(y_size);
- params.quad_rect = gfx::Rect(window_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
- Sleep(1000);
-
- SkColor expected_color = SkColorSetRGB(0xff, 0xb7, 0xff);
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
-}
-
-TEST_F(DirectCompositionPixelTest, VideoHandleSwapchain) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, true);
- Microsoft::WRL::ComPtr<IDXGIResource1> resource;
- texture.As(&resource);
- HANDLE handle = 0;
- resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
- &handle);
- // The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
- gfx::BufferFormat::RGBA_8888));
-
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(window_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- Sleep(1000);
-
- SkColor expected_color = SkColorSetRGB(0xe1, 0x90, 0xeb);
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
-}
-
-TEST_F(DirectCompositionPixelTest, SkipVideoLayerEmptyBoundsRect) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
-
- glClearColor(0.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, true);
- Microsoft::WRL::ComPtr<IDXGIResource1> resource;
- texture.As(&resource);
- HANDLE handle = 0;
- resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
- &handle);
- // The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
- gfx::BufferFormat::RGBA_8888));
-
- // Layer with empty bounds rect.
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- Sleep(1000);
-
- // No color is written since the visual committed to DirectComposition has no
- // content.
- SkColor expected_color = SK_ColorBLACK;
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
-}
-
-TEST_F(DirectCompositionPixelTest, SkipVideoLayerEmptyContentsRect) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- // Swap chain size is overridden to content rect size only if scaled overlays
- // are supported.
- DirectCompositionSurfaceWin::SetScaledOverlaysSupportedForTesting(true);
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
-
- glClearColor(0.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, true);
- Microsoft::WRL::ComPtr<IDXGIResource1> resource;
- texture.As(&resource);
- HANDLE handle = 0;
- resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
- &handle);
- // The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
- gfx::BufferFormat::RGBA_8888));
-
- // Layer with empty content rect.
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.quad_rect = gfx::Rect(window_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- Sleep(1000);
-
- // No color is written since the visual committed to DirectComposition has no
- // content.
- SkColor expected_color = SK_ColorBLACK;
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
-}
-
-TEST_F(DirectCompositionPixelTest, NV12SwapChain) {
- if (!CheckIfDCSupported())
- return;
- DirectCompositionSurfaceWin::SetPreferNV12OverlaysForTesting();
- InitializeSurface();
-
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
-
- glClearColor(0.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, true);
- Microsoft::WRL::ComPtr<IDXGIResource1> resource;
- texture.As(&resource);
- HANDLE handle = 0;
- resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
- &handle);
- // The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
- gfx::BufferFormat::RGBA_8888));
-
- // Pass content rect with odd with and height. Surface should round up width
- // and height when creating swap chain.
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(0, 0, 49, 49);
- params.quad_rect = gfx::Rect(window_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- Sleep(1000);
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface_->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC1 desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
- EXPECT_EQ(desc.Format, DXGI_FORMAT_NV12);
- EXPECT_EQ(desc.Width, 50u);
- EXPECT_EQ(desc.Height, 50u);
-
- SkColor expected_color = SkColorSetRGB(0xe1, 0x90, 0xeb);
- SkColor actual_color =
- ReadBackWindowPixel(window_.hwnd(), gfx::Point(75, 75));
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color;
-}
-
-TEST_F(DirectCompositionPixelTest, NonZeroBoundsOffset) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- // Swap chain size is overridden to content rect size only if scaled overlays
- // are supported.
- DirectCompositionSurfaceWin::SetScaledOverlaysSupportedForTesting(true);
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
-
- glClearColor(0.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, true);
- Microsoft::WRL::ComPtr<IDXGIResource1> resource;
- texture.As(&resource);
- HANDLE handle = 0;
- resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
- &handle);
- // The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
- gfx::BufferFormat::RGBA_8888));
-
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(gfx::Point(25, 25), texture_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
-
- Sleep(1000);
-
- SkColor video_color = SkColorSetRGB(0xe1, 0x90, 0xeb);
- struct {
- gfx::Point point;
- SkColor expected_color;
- } test_cases[] = {
- // Outside bounds
- {{24, 24}, SK_ColorBLACK},
- {{75, 75}, SK_ColorBLACK},
- // Inside bounds
- {{25, 25}, video_color},
- {{74, 74}, video_color},
- };
-
- auto pixels = ReadBackWindow(window_.hwnd(), window_size);
-
- for (const auto& test_case : test_cases) {
- const auto& point = test_case.point;
- const auto& expected_color = test_case.expected_color;
- SkColor actual_color = pixels[window_size.width() * point.y() + point.x()];
- EXPECT_TRUE(AreColorsSimilar(expected_color, actual_color))
- << std::hex << "Expected " << expected_color << " Actual "
- << actual_color << " at " << point.ToString();
- }
-}
-
-TEST_F(DirectCompositionPixelTest, ResizeVideoLayer) {
- if (!CheckIfDCSupported())
- return;
- InitializeSurface();
- surface_->SetEnableDCLayers(true);
-
- gfx::Size window_size(100, 100);
- EXPECT_TRUE(surface_->Resize(window_size, 1.0,
- gl::GLSurface::ColorSpace::UNSPECIFIED, true));
- EXPECT_TRUE(surface_->SetDrawRectangle(gfx::Rect(window_size)));
-
- glClearColor(0.0, 0.0, 0.0, 1.0);
- glClear(GL_COLOR_BUFFER_BIT);
-
- Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
- gl::QueryD3D11DeviceObjectFromANGLE();
-
- gfx::Size texture_size(50, 50);
- Microsoft::WRL::ComPtr<ID3D11Texture2D> texture =
- CreateNV12Texture(d3d11_device, texture_size, true);
- Microsoft::WRL::ComPtr<IDXGIResource1> resource;
- texture.As(&resource);
- HANDLE handle = 0;
- resource->CreateSharedHandle(nullptr, DXGI_SHARED_RESOURCE_READ, nullptr,
- &handle);
- // The format doesn't matter, since we aren't binding.
- scoped_refptr<gl::GLImageDXGI> image_dxgi(
- new gl::GLImageDXGI(texture_size, nullptr));
- ASSERT_TRUE(image_dxgi->InitializeHandle(base::win::ScopedHandle(handle), 0,
- gfx::BufferFormat::RGBA_8888));
-
- {
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(texture_size);
- params.quad_rect = gfx::Rect(window_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
- }
-
- Microsoft::WRL::ComPtr<IDXGISwapChain1> swap_chain =
- surface_->GetLayerSwapChainForTesting(0);
- ASSERT_TRUE(swap_chain);
-
- DXGI_SWAP_CHAIN_DESC1 desc;
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
- EXPECT_EQ(desc.Width, 50u);
- EXPECT_EQ(desc.Height, 50u);
-
- {
- ui::DCRendererLayerParams params;
- params.y_image = image_dxgi;
- params.uv_image = image_dxgi;
- params.content_rect = gfx::Rect(30, 30);
- params.quad_rect = gfx::Rect(window_size);
- surface_->ScheduleDCLayer(params);
-
- EXPECT_EQ(gfx::SwapResult::SWAP_ACK,
- surface_->SwapBuffers(base::DoNothing()));
- }
-
- // Swap chain isn't recreated on resize.
- ASSERT_TRUE(surface_->GetLayerSwapChainForTesting(0));
- EXPECT_EQ(swap_chain.Get(), surface_->GetLayerSwapChainForTesting(0).Get());
- EXPECT_TRUE(SUCCEEDED(swap_chain->GetDesc1(&desc)));
- EXPECT_EQ(desc.Width, 30u);
- EXPECT_EQ(desc.Height, 30u);
-}
-
-} // namespace
-} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
index 2559626a781..0c999d8e0d4 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.cc
@@ -123,10 +123,10 @@ gpu::ContextResult GLES2CommandBufferStub::Initialize(
}
#if defined(OS_MACOSX)
- // Virtualize PreferIntegratedGpu contexts by default on OS X to prevent
+ // Virtualize GpuPreference::kLowPower contexts by default on OS X to prevent
// performance regressions when enabling FCM.
// http://crbug.com/180463
- if (init_params.attribs.gpu_preference == gl::PreferIntegratedGpu)
+ if (init_params.attribs.gpu_preference == gl::GpuPreference::kLowPower)
use_virtualized_gl_context_ = true;
#endif
@@ -428,6 +428,10 @@ int32_t GLES2CommandBufferStub::GetRouteID() const {
return route_id_;
}
+viz::GpuVSyncCallback GLES2CommandBufferStub::GetGpuVSyncCallback() {
+ return viz::GpuVSyncCallback();
+}
+
MemoryTracker* GLES2CommandBufferStub::GetMemoryTracker() const {
return context_group_->memory_tracker();
}
diff --git a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
index 6e09643060d..8ef50166787 100644
--- a/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
+++ b/chromium/gpu/ipc/service/gles2_command_buffer_stub.h
@@ -52,6 +52,7 @@ class GPU_IPC_SERVICE_EXPORT GLES2CommandBufferStub
void AddFilter(IPC::MessageFilter* message_filter) override;
int32_t GetRouteID() const override;
+ viz::GpuVSyncCallback GetGpuVSyncCallback() override;
private:
bool HandleMessage(const IPC::Message& message) override;
diff --git a/chromium/gpu/ipc/service/gpu_channel.cc b/chromium/gpu/ipc/service/gpu_channel.cc
index 5199378cce9..71446adbbe8 100644
--- a/chromium/gpu/ipc/service/gpu_channel.cc
+++ b/chromium/gpu/ipc/service/gpu_channel.cc
@@ -19,6 +19,7 @@
#include "base/command_line.h"
#include "base/containers/circular_deque.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
@@ -103,6 +104,10 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelMessageFilter
void AddChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
void RemoveChannelFilter(scoped_refptr<IPC::MessageFilter> filter);
+ ImageDecodeAcceleratorStub* image_decode_accelerator_stub() const {
+ return image_decode_accelerator_stub_.get();
+ }
+
private:
~GpuChannelMessageFilter() override;
@@ -573,6 +578,11 @@ void GpuChannel::HandleMessageForTesting(const IPC::Message& msg) {
filter_->OnMessageReceived(msg);
}
+ImageDecodeAcceleratorStub* GpuChannel::GetImageDecodeAcceleratorStub() const {
+ DCHECK(filter_);
+ return filter_->image_decode_accelerator_stub();
+}
+
bool GpuChannel::CreateSharedImageStub() {
// SharedImageInterfaceProxy/Stub is a singleton per channel, using a reserved
// route.
diff --git a/chromium/gpu/ipc/service/gpu_channel.h b/chromium/gpu/ipc/service/gpu_channel.h
index 1cfe9784968..8c10927de09 100644
--- a/chromium/gpu/ipc/service/gpu_channel.h
+++ b/chromium/gpu/ipc/service/gpu_channel.h
@@ -44,6 +44,7 @@ namespace gpu {
class GpuChannelManager;
class GpuChannelMessageFilter;
+class ImageDecodeAcceleratorStub;
class ImageDecodeAcceleratorWorker;
class Scheduler;
class SharedImageStub;
@@ -154,6 +155,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannel : public IPC::Listener,
void HandleMessageForTesting(const IPC::Message& msg);
+ ImageDecodeAcceleratorStub* GetImageDecodeAcceleratorStub() const;
+
#if defined(OS_ANDROID)
const CommandBufferStub* GetOneStub() const;
#endif
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 3f3d6e4c62c..05705cd0b56 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -66,7 +66,8 @@ GpuChannelManager::GpuChannelManager(
GpuProcessActivityFlags activity_flags,
scoped_refptr<gl::GLSurface> default_offscreen_surface,
ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
- viz::VulkanContextProvider* vulkan_context_provider)
+ viz::VulkanContextProvider* vulkan_context_provider,
+ viz::MetalContextProvider* metal_context_provider)
: task_runner_(task_runner),
io_task_runner_(io_task_runner),
gpu_preferences_(gpu_preferences),
@@ -89,6 +90,7 @@ GpuChannelManager::GpuChannelManager(
base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
base::Unretained(this))),
vulkan_context_provider_(vulkan_context_provider),
+ metal_context_provider_(metal_context_provider),
weak_factory_(this) {
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(io_task_runner);
@@ -115,6 +117,11 @@ GpuChannelManager::~GpuChannelManager() {
default_offscreen_surface_->Destroy();
default_offscreen_surface_ = nullptr;
}
+
+ // Try to make the context current so that GPU resources can be destroyed
+ // correctly.
+ if (shared_context_state_)
+ shared_context_state_->MakeCurrent(nullptr);
}
gles2::Outputter* GpuChannelManager::outputter() {
@@ -352,7 +359,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
scoped_refptr<gl::GLSurface> surface = default_offscreen_surface();
bool use_virtualized_gl_contexts = false;
#if defined(OS_MACOSX)
- // Virtualize PreferIntegratedGpu contexts by default on OS X to prevent
+ // Virtualize GpuPreference::kLowPower contexts by default on OS X to prevent
// performance regressions when enabling FCM.
// http://crbug.com/180463
use_virtualized_gl_contexts = true;
@@ -422,7 +429,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
use_virtualized_gl_contexts,
base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
/*synthetic_loss=*/false),
- vulkan_context_provider_);
+ vulkan_context_provider_, metal_context_provider_);
// OOP-R needs GrContext for raster tiles.
bool need_gr_context =
@@ -433,7 +440,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
need_gr_context |= features::IsUsingSkiaRenderer();
if (need_gr_context) {
- if (!vulkan_context_provider_) {
+ if (!vulkan_context_provider_ && !metal_context_provider_) {
auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
gpu_driver_bug_workarounds(), gpu_feature_info());
if (!shared_context_state_->InitializeGL(gpu_preferences_,
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index 5b00e29d4b9..be47bf845aa 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -80,7 +80,8 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
GpuProcessActivityFlags activity_flags,
scoped_refptr<gl::GLSurface> default_offscreen_surface,
ImageDecodeAcceleratorWorker* image_decode_accelerator_worker,
- viz::VulkanContextProvider* vulkan_context_provider = nullptr);
+ viz::VulkanContextProvider* vulkan_context_provider = nullptr,
+ viz::MetalContextProvider* metal_context_provider = nullptr);
~GpuChannelManager() override;
GpuChannelManagerDelegate* delegate() const { return delegate_; }
@@ -242,10 +243,14 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
base::Optional<raster::GrCacheController> gr_cache_controller_;
scoped_refptr<SharedContextState> shared_context_state_;
- // With --enable-vulkan, the vulkan_context_provider_ will be set from
+ // With --enable-vulkan, |vulkan_context_provider_| will be set from
// viz::GpuServiceImpl. The raster decoders will use it for rasterization.
viz::VulkanContextProvider* vulkan_context_provider_ = nullptr;
+ // If features::SkiaOnMetad, |metal_context_provider_| will be set from
+ // viz::GpuServiceImpl. The raster decoders will use it for rasterization.
+ viz::MetalContextProvider* metal_context_provider_ = nullptr;
+
// Member variables should appear before the WeakPtrFactory, to ensure
// that any WeakPtrs to Controller are invalidated before its members
// variable's destructors are executed, rendering them invalid.
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index 603782ef2a4..0302db044c2 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -33,15 +33,17 @@
#if defined(USE_OZONE)
#include "ui/ozone/public/ozone_platform.h"
+#include "ui/ozone/public/surface_factory_ozone.h"
#endif
#if defined(OS_WIN)
-#include "gpu/ipc/service/direct_composition_surface_win.h"
+#include "ui/gl/direct_composition_surface_win.h"
#include "ui/gl/gl_surface_egl.h"
#endif
#if defined(OS_ANDROID)
#include "base/android/android_image_reader_compat.h"
+#include "ui/gl/android/android_surface_control_compat.h"
#endif
#if BUILDFLAG(ENABLE_VULKAN)
@@ -57,7 +59,7 @@ bool CollectGraphicsInfo(GPUInfo* gpu_info,
DCHECK(gpu_info);
TRACE_EVENT0("gpu,startup", "Collect Graphics Info");
base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
- bool success = CollectContextGraphicsInfo(gpu_info, gpu_preferences);
+ bool success = CollectContextGraphicsInfo(gpu_info);
if (!success)
LOG(ERROR) << "gpu::CollectGraphicsInfo failed.";
@@ -70,23 +72,40 @@ bool CollectGraphicsInfo(GPUInfo* gpu_info,
}
#if defined(OS_WIN)
+OverlaySupport FlagsToOverlaySupport(UINT flags) {
+ if (flags & DXGI_OVERLAY_SUPPORT_FLAG_SCALING)
+ return OverlaySupport::kScaling;
+ if (flags & DXGI_OVERLAY_SUPPORT_FLAG_DIRECT)
+ return OverlaySupport::kDirect;
+ return OverlaySupport::kNone;
+}
+#endif // OS_WIN
+
+void InitializePlatformOverlaySettings(GPUInfo* gpu_info) {
+#if defined(OS_WIN)
// This has to be called after a context is created, active GPU is identified,
// and GPU driver bug workarounds are computed again. Otherwise the workaround
// |disable_direct_composition| may not be correctly applied.
// Also, this has to be called after falling back to SwiftShader decision is
// finalized because this function depends on GL is ANGLE's GLES or not.
-void InitializeDirectCompositionOverlaySupport(GPUInfo* gpu_info) {
if (gl::GetGLImplementation() == gl::kGLImplementationEGLGLES2) {
DCHECK(gpu_info);
gpu_info->direct_composition =
- DirectCompositionSurfaceWin::IsDirectCompositionSupported();
+ gl::DirectCompositionSurfaceWin::IsDirectCompositionSupported();
gpu_info->supports_overlays =
- DirectCompositionSurfaceWin::AreOverlaysSupported();
- gpu_info->overlay_capabilities =
- DirectCompositionSurfaceWin::GetOverlayCapabilities();
+ gl::DirectCompositionSurfaceWin::AreOverlaysSupported();
+ gpu_info->nv12_overlay_support = FlagsToOverlaySupport(
+ gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
+ DXGI_FORMAT_NV12));
+ gpu_info->yuy2_overlay_support = FlagsToOverlaySupport(
+ gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
+ DXGI_FORMAT_YUY2));
}
+#elif defined(OS_ANDROID)
+ if (gpu_info->gpu.vendor_string == "Qualcomm")
+ gl::SurfaceControl::EnableQualcommUBWC();
+#endif
}
-#endif // defined(OS_WIN)
#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && !defined(IS_CHROMECAST)
bool CanAccessNvidiaDeviceFile() {
@@ -225,6 +244,10 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
.requires_mojo;
params.viz_display_compositor = features::IsVizDisplayCompositorEnabled();
ui::OzonePlatform::InitializeForGPU(params);
+ const std::vector<gfx::BufferFormat> supported_buffer_formats_for_texturing =
+ ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->GetSupportedFormatsForTexturing();
#endif
#if BUILDFLAG(ENABLE_VULKAN)
@@ -303,9 +326,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
}
-#if defined(OS_WIN)
- InitializeDirectCompositionOverlaySupport(&gpu_info_);
-#endif
+ InitializePlatformOverlaySettings(&gpu_info_);
#if defined(OS_LINUX)
// Driver may create a compatibility profile context when collect graphics
@@ -387,6 +408,10 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
base::android::AndroidImageReader::DisableSupport();
}
#endif
+#if defined(USE_OZONE)
+ gpu_feature_info_.supported_buffer_formats_for_allocation_and_texturing =
+ std::move(supported_buffer_formats_for_texturing);
+#endif
return true;
}
@@ -426,6 +451,10 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
.requires_mojo;
params.viz_display_compositor = features::IsVizDisplayCompositorEnabled();
ui::OzonePlatform::InitializeForGPU(params);
+ const std::vector<gfx::BufferFormat> supported_buffer_formats_for_texturing =
+ ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->GetSupportedFormatsForTexturing();
ui::OzonePlatform::GetInstance()->AfterSandboxEntry();
#endif
bool needs_more_info = true;
@@ -454,7 +483,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
if (!gl_disabled && !use_swiftshader) {
- CollectContextGraphicsInfo(&gpu_info_, gpu_preferences_);
+ CollectContextGraphicsInfo(&gpu_info_);
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, nullptr);
use_swiftshader = EnableSwiftShaderIfNeeded(
@@ -485,16 +514,14 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
}
-#if defined(OS_WIN)
- InitializeDirectCompositionOverlaySupport(&gpu_info_);
-#endif
+ InitializePlatformOverlaySettings(&gpu_info_);
#if defined(OS_LINUX)
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
if (!gl_disabled && !use_swiftshader) {
- CollectContextGraphicsInfo(&gpu_info_, gpu_preferences_);
+ CollectContextGraphicsInfo(&gpu_info_);
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, nullptr);
use_swiftshader = EnableSwiftShaderIfNeeded(
@@ -515,6 +542,11 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
AdjustInfoToSwiftShader();
}
+#if defined(USE_OZONE)
+ gpu_feature_info_.supported_buffer_formats_for_allocation_and_texturing =
+ std::move(supported_buffer_formats_for_texturing);
+#endif
+
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
}
#endif // OS_ANDROID
@@ -524,7 +556,7 @@ void GpuInit::AdjustInfoToSwiftShader() {
gpu_info_.passthrough_cmd_decoder = false;
gpu_feature_info_for_hardware_gpu_ = gpu_feature_info_;
gpu_feature_info_ = ComputeGpuFeatureInfoForSwiftShader();
- CollectContextGraphicsInfo(&gpu_info_, gpu_preferences_);
+ CollectContextGraphicsInfo(&gpu_info_);
}
scoped_refptr<gl::GLSurface> GpuInit::TakeDefaultOffscreenSurface() {
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc
index 6b5c797846b..46f0560ef73 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.cc
@@ -4,8 +4,8 @@
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
+#include <memory>
+
#include "build/build_config.h"
#if defined(OS_MACOSX)
@@ -28,15 +28,17 @@ namespace gpu {
// static
std::unique_ptr<GpuMemoryBufferFactory>
-GpuMemoryBufferFactory::CreateNativeType() {
+GpuMemoryBufferFactory::CreateNativeType(
+ viz::VulkanContextProvider* vulkan_context_provider) {
#if defined(OS_MACOSX)
- return base::WrapUnique(new GpuMemoryBufferFactoryIOSurface);
+ return std::make_unique<GpuMemoryBufferFactoryIOSurface>();
#elif defined(OS_ANDROID)
- return base::WrapUnique(new GpuMemoryBufferFactoryAndroidHardwareBuffer);
+ return std::make_unique<GpuMemoryBufferFactoryAndroidHardwareBuffer>();
#elif defined(OS_LINUX) || defined(OS_FUCHSIA)
- return base::WrapUnique(new GpuMemoryBufferFactoryNativePixmap);
+ return std::make_unique<GpuMemoryBufferFactoryNativePixmap>(
+ vulkan_context_provider);
#elif defined(OS_WIN)
- return base::WrapUnique(new GpuMemoryBufferFactoryDXGI);
+ return std::make_unique<GpuMemoryBufferFactoryDXGI>();
#else
return nullptr;
#endif
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h
index 1cbd31ecbc7..e603ff83af2 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory.h
@@ -15,6 +15,10 @@
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/gpu_memory_buffer.h"
+namespace viz {
+class VulkanContextProvider;
+} // namespace viz
+
namespace gpu {
class ImageFactory;
@@ -25,7 +29,8 @@ class GPU_IPC_SERVICE_EXPORT GpuMemoryBufferFactory {
// Creates a new factory instance for native GPU memory buffers. Returns null
// if native buffers are not supported.
- static std::unique_ptr<GpuMemoryBufferFactory> CreateNativeType();
+ static std::unique_ptr<GpuMemoryBufferFactory> CreateNativeType(
+ viz::VulkanContextProvider* vulkan_context_provider);
// Creates a new GPU memory buffer instance. A valid handle is returned on
// success. It can be called on any thread.
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
index 92467764359..70fb7652a17 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.cc
@@ -4,7 +4,9 @@
#include "gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
+#include "gpu/vulkan/vulkan_device_queue.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gfx/client_native_pixmap.h"
#include "ui/gfx/linux/native_pixmap_dmabuf.h"
@@ -20,8 +22,12 @@
namespace gpu {
-GpuMemoryBufferFactoryNativePixmap::GpuMemoryBufferFactoryNativePixmap() =
- default;
+GpuMemoryBufferFactoryNativePixmap::GpuMemoryBufferFactoryNativePixmap()
+ : vulkan_context_provider_(nullptr) {}
+
+GpuMemoryBufferFactoryNativePixmap::GpuMemoryBufferFactoryNativePixmap(
+ viz::VulkanContextProvider* vulkan_context_provider)
+ : vulkan_context_provider_(vulkan_context_provider) {}
GpuMemoryBufferFactoryNativePixmap::~GpuMemoryBufferFactoryNativePixmap() =
default;
@@ -38,7 +44,8 @@ GpuMemoryBufferFactoryNativePixmap::CreateGpuMemoryBuffer(
scoped_refptr<gfx::NativePixmap> pixmap =
ui::OzonePlatform::GetInstance()
->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(surface_handle, size, format, usage);
+ ->CreateNativePixmap(surface_handle, GetVulkanDevice(), size, format,
+ usage);
if (!pixmap.get()) {
DLOG(ERROR) << "Failed to create pixmap " << size.ToString() << ", "
<< gfx::BufferFormatToString(format) << ", usage "
@@ -144,10 +151,10 @@ GpuMemoryBufferFactoryNativePixmap::CreateAnonymousImage(
bool* is_cleared) {
scoped_refptr<gfx::NativePixmap> pixmap;
#if defined(USE_OZONE)
- pixmap =
- ui::OzonePlatform::GetInstance()
- ->GetSurfaceFactoryOzone()
- ->CreateNativePixmap(gpu::kNullSurfaceHandle, size, format, usage);
+ pixmap = ui::OzonePlatform::GetInstance()
+ ->GetSurfaceFactoryOzone()
+ ->CreateNativePixmap(gpu::kNullSurfaceHandle, GetVulkanDevice(),
+ size, format, usage);
#else
NOTIMPLEMENTED();
#endif
@@ -170,4 +177,10 @@ unsigned GpuMemoryBufferFactoryNativePixmap::RequiredTextureType() {
return GL_TEXTURE_2D;
}
+VkDevice GpuMemoryBufferFactoryNativePixmap::GetVulkanDevice() {
+ return vulkan_context_provider_
+ ? vulkan_context_provider_->GetDeviceQueue()->GetVulkanDevice()
+ : VK_NULL_HANDLE;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
index f9d7730874f..b3692aeded4 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h
@@ -5,6 +5,8 @@
#ifndef GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_NATIVE_PIXMAP_H_
#define GPU_IPC_SERVICE_GPU_MEMORY_BUFFER_FACTORY_NATIVE_PIXMAP_H_
+#include <vulkan/vulkan.h>
+
#include <unordered_map>
#include <utility>
@@ -27,6 +29,8 @@ class GPU_IPC_SERVICE_EXPORT GpuMemoryBufferFactoryNativePixmap
public ImageFactory {
public:
GpuMemoryBufferFactoryNativePixmap();
+ explicit GpuMemoryBufferFactoryNativePixmap(
+ viz::VulkanContextProvider* vulkan_context_provider);
~GpuMemoryBufferFactoryNativePixmap() override;
// Overridden from GpuMemoryBufferFactory:
@@ -61,6 +65,11 @@ class GPU_IPC_SERVICE_EXPORT GpuMemoryBufferFactoryNativePixmap
using NativePixmapMap = std::unordered_map<NativePixmapMapKey,
scoped_refptr<gfx::NativePixmap>,
NativePixmapMapKeyHash>;
+
+ VkDevice GetVulkanDevice();
+
+ scoped_refptr<viz::VulkanContextProvider> vulkan_context_provider_;
+
NativePixmapMap native_pixmaps_;
base::Lock native_pixmaps_lock_;
diff --git a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
index b1d11027a44..8af3ce42507 100644
--- a/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap_unittest.cc
@@ -3,14 +3,20 @@
// found in the LICENSE file.
#include "gpu/ipc/service/gpu_memory_buffer_factory_native_pixmap.h"
+
+#include "build/build_config.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory_test_template.h"
namespace gpu {
namespace {
+// On Fuchsia NativePixmap depends on Vulkan, which is not initialized in tests.
+// See crbug.com/957700
+#if !defined(OS_FUCHSIA)
INSTANTIATE_TYPED_TEST_SUITE_P(GpuMemoryBufferFactoryNativePixmap,
GpuMemoryBufferFactoryTest,
GpuMemoryBufferFactoryNativePixmap);
+#endif
} // namespace
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index 45288aedda6..abec67d0d5a 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -28,6 +28,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "gpu/config/gpu_crash_keys.h"
+#include "gpu/config/gpu_finch_features.h"
#if defined(OS_WIN)
#include <windows.h>
@@ -113,19 +114,7 @@ std::unique_ptr<GpuWatchdogThread> GpuWatchdogThread::Create(
}
void GpuWatchdogThread::CheckArmed() {
- last_reported_progress_timeticks_ = base::TimeTicks::Now();
- // If the watchdog is |awaiting_acknowledge_|, reset this variable to false
- // and post an acknowledge task now. No barrier is needed as
- // |awaiting_acknowledge_| is only ever read from this thread.
- if (base::subtle::NoBarrier_CompareAndSwap(&awaiting_acknowledge_, true,
- false)) {
- // Called on the monitored thread. Responds with OnAcknowledge. Cannot use
- // the method factory. As we stop the task runner before destroying this
- // class, the unretained reference will always outlive the task.
- task_runner()->PostTask(FROM_HERE,
- base::BindOnce(&GpuWatchdogThread::OnAcknowledge,
- base::Unretained(this)));
- }
+ base::subtle::NoBarrier_Store(&awaiting_acknowledge_, false);
}
void GpuWatchdogThread::ReportProgress() {
@@ -345,6 +334,11 @@ void GpuWatchdogThread::OnCheck(bool after_suspend) {
}
void GpuWatchdogThread::OnCheckTimeout() {
+ DeliberatelyTerminateToRecoverFromHang();
+}
+
+// Use the --disable-gpu-watchdog command line switch to disable this.
+void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
// Should not get here while the system is suspended.
DCHECK(!suspension_counter_.HasRefs());
@@ -353,39 +347,15 @@ void GpuWatchdogThread::OnCheckTimeout() {
// when a machine wakes up from sleep or hibernation, which would otherwise
// appear to be a hang.
if (base::Time::Now() > suspension_timeout_) {
- armed_ = false;
- OnCheck(true);
+ OnAcknowledge();
return;
}
if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
- // This should be possible only when CheckArmed() has been called but
- // OnAcknowledge() hasn't.
- // In this case the watched thread might need more time to finish posting
- // OnAcknowledge task.
-
- // Continue with the termination after an additional delay.
- task_runner()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(
- &GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
- weak_factory_.GetWeakPtr()),
- 0.5 * timeout_);
-
- // Post a task that does nothing on the watched thread to bump its priority
- // and make it more likely to get scheduled.
- watched_task_runner_->PostTask(FROM_HERE, base::DoNothing());
+ OnAcknowledge();
return;
}
- DeliberatelyTerminateToRecoverFromHang();
-}
-
-// Use the --disable-gpu-watchdog command line switch to disable this.
-void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
- // Should not get here while the system is suspended.
- DCHECK(!suspension_counter_.HasRefs());
-
if (alternative_terminate_for_testing_) {
alternative_terminate_for_testing_.Run();
return;
@@ -399,9 +369,8 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
if (use_thread_cpu_time_ && (time_since_arm < timeout_)) {
task_runner()->PostDelayedTask(
FROM_HERE,
- base::BindOnce(
- &GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang,
- weak_factory_.GetWeakPtr()),
+ base::BindOnce(&GpuWatchdogThread::OnCheckTimeout,
+ weak_factory_.GetWeakPtr()),
timeout_ - time_since_arm);
return;
}
@@ -500,8 +469,6 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
base::subtle::NoBarrier_Load(&awaiting_acknowledge_);
base::debug::Alias(&awaiting_acknowledge);
- base::TimeTicks before_logging_timeticks = base::TimeTicks::Now();
-
// Don't log the message to stderr in release builds because the buffer
// may be full.
std::string message = base::StringPrintf(
@@ -517,28 +484,17 @@ void GpuWatchdogThread::DeliberatelyTerminateToRecoverFromHang() {
base::debug::Alias(&current_time);
base::debug::Alias(&current_timeticks);
- int64_t since_last_logging =
- (current_timeticks - before_logging_timeticks).InSeconds();
- crash_keys::seconds_since_last_logging.Set(
- base::NumberToString(since_last_logging));
- int64_t since_last_progress_report =
- (current_timeticks - last_reported_progress_timeticks_).InSeconds();
- crash_keys::seconds_since_last_progress_report.Set(
- base::NumberToString(since_last_progress_report));
- int64_t since_last_suspend =
- (current_timeticks - last_suspend_timeticks_).InSeconds();
- crash_keys::seconds_since_last_suspend.Set(
- base::NumberToString(since_last_suspend));
- int64_t since_last_resume =
- (current_timeticks - last_resume_timeticks_).InSeconds();
- crash_keys::seconds_since_last_resume.Set(
- base::NumberToString(since_last_resume));
-
int64_t available_physical_memory =
base::SysInfo::AmountOfAvailablePhysicalMemory() >> 20;
crash_keys::available_physical_memory_in_mb.Set(
base::NumberToString(available_physical_memory));
+ // Check it one last time before crashing.
+ if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
+ OnAcknowledge();
+ return;
+ }
+
// Deliberately crash the process to create a crash dump.
*((volatile int*)0) = 0x1337;
@@ -587,12 +543,10 @@ void GpuWatchdogThread::OnAddPowerObserver() {
}
void GpuWatchdogThread::OnSuspend() {
- last_suspend_timeticks_ = base::TimeTicks::Now();
power_suspend_ref_ = suspension_counter_.Take();
}
void GpuWatchdogThread::OnResume() {
- last_resume_timeticks_ = base::TimeTicks::Now();
power_suspend_ref_.reset();
}
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index b9388fec81d..0b0155ceeb3 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -187,10 +187,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread,
base::Time check_time_;
base::TimeTicks check_timeticks_;
- base::TimeTicks last_reported_progress_timeticks_;
- base::TimeTicks last_suspend_timeticks_;
- base::TimeTicks last_resume_timeticks_;
-
#if defined(USE_X11)
XDisplay* display_;
gfx::AcceleratedWidget window_;
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
index 38994b889bd..32376a4c06f 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.cc
@@ -4,14 +4,21 @@
#include "gpu/ipc/service/image_decode_accelerator_stub.h"
+#include <stddef.h>
+
+#include <new>
#include <utility>
+#include <vector>
#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/containers/span.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/logging.h"
+#include "base/numerics/checked_math.h"
#include "base/single_thread_task_runner.h"
+#include "build/build_config.h"
#include "gpu/command_buffer/common/constants.h"
#include "gpu/command_buffer/common/context_result.h"
#include "gpu/command_buffer/common/discardable_handle.h"
@@ -19,23 +26,67 @@
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_context.h"
+#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/service_transfer_cache.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/sync_point_manager.h"
#include "gpu/config/gpu_finch_features.h"
#include "gpu/ipc/common/command_buffer_id.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/service/command_buffer_stub.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "ipc/ipc_message.h"
#include "ipc/ipc_message_macros.h"
+#include "third_party/skia/include/core/SkImage.h"
#include "third_party/skia/include/core/SkImageInfo.h"
+#include "third_party/skia/include/core/SkRefCnt.h"
+#include "third_party/skia/include/gpu/GrBackendSurface.h"
+#include "third_party/skia/include/gpu/GrTypes.h"
+#include "third_party/skia/include/gpu/gl/GrGLTypes.h"
+#include "ui/gfx/buffer_types.h"
#include "ui/gfx/color_space.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image.h"
+
+#if defined(OS_CHROMEOS)
+#include "ui/gfx/linux/native_pixmap_dmabuf.h"
+#include "ui/gl/gl_image_native_pixmap.h"
+#endif
namespace gpu {
class Buffer;
+#if defined(OS_CHROMEOS)
+namespace {
+
+struct CleanUpContext {
+ scoped_refptr<base::SingleThreadTaskRunner> main_task_runner;
+ SharedContextState* shared_context_state = nullptr;
+ scoped_refptr<gl::GLImage> gl_image;
+ GLuint texture = 0;
+};
+
+void CleanUpResource(SkImage::ReleaseContext context) {
+ auto* clean_up_context = static_cast<CleanUpContext*>(context);
+ DCHECK(clean_up_context->main_task_runner->BelongsToCurrentThread());
+ if (clean_up_context->shared_context_state->IsCurrent(
+ nullptr /* surface */)) {
+ DCHECK(!clean_up_context->shared_context_state->context_lost());
+ glDeleteTextures(1u, &clean_up_context->texture);
+ } else {
+ DCHECK(clean_up_context->shared_context_state->context_lost());
+ }
+ // The GLImage is destroyed here (it should be destroyed regardless of whether
+ // the context is lost or current).
+ delete clean_up_context;
+}
+
+} // namespace
+#endif
+
ImageDecodeAcceleratorStub::ImageDecodeAcceleratorStub(
ImageDecodeAcceleratorWorker* worker,
GpuChannel* channel,
@@ -81,6 +132,11 @@ void ImageDecodeAcceleratorStub::Shutdown() {
channel_ = nullptr;
}
+void ImageDecodeAcceleratorStub::SetImageFactoryForTesting(
+ ImageFactory* image_factory) {
+ external_image_factory_for_testing_ = image_factory;
+}
+
ImageDecodeAcceleratorStub::~ImageDecodeAcceleratorStub() {
DCHECK(!channel_);
}
@@ -111,6 +167,14 @@ void ImageDecodeAcceleratorStub::OnScheduleImageDecode(
return;
}
+ // TODO(andrescj): for now, reject requests that need mipmaps until we support
+ // generating mipmap chains.
+ if (decode_params.needs_mips) {
+ DLOG(ERROR) << "Generating mipmaps is not supported";
+ OnError();
+ return;
+ }
+
// Start the actual decode.
worker_->Decode(
std::move(decode_params.encoded_data), decode_params.output_size,
@@ -148,7 +212,7 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
// Gain access to the transfer cache through the GpuChannelManager's
// SharedContextState. We will also use that to get a GrContext that will be
- // used for uploading the image.
+ // used for Skia operations.
ContextResult context_result;
scoped_refptr<SharedContextState> shared_context_state =
channel_->gpu_channel_manager()->GetSharedContextState(&context_result);
@@ -158,6 +222,15 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
return;
}
DCHECK(shared_context_state);
+
+ // TODO(andrescj): in addition to this check, we should not advertise support
+ // for hardware decode acceleration if we're not using GL (until we support
+ // other graphics APIs).
+ if (!shared_context_state->IsGLInitialized()) {
+ DLOG(ERROR) << "GL has not been initialized";
+ OnError();
+ return;
+ }
if (!shared_context_state->gr_context()) {
DLOG(ERROR) << "Could not get the GrContext";
OnError();
@@ -169,6 +242,115 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
return;
}
+ std::vector<sk_sp<SkImage>> plane_sk_images;
+#if defined(OS_CHROMEOS)
+ // Right now, we only support YUV 4:2:0 for the output of the decoder.
+ //
+ // TODO(andrescj): change to gfx::BufferFormat::YUV_420 once
+ // https://crrev.com/c/1573718 lands.
+ DCHECK_EQ(gfx::BufferFormat::YVU_420, completed_decode->buffer_format);
+ DCHECK_EQ(3u, completed_decode->handle.native_pixmap_handle.planes.size());
+
+ // Calculate the dimensions of each of the planes.
+ const gfx::Size y_plane_size = completed_decode->visible_size;
+ base::CheckedNumeric<int> safe_uv_width(y_plane_size.width());
+ base::CheckedNumeric<int> safe_uv_height(y_plane_size.height());
+ safe_uv_width += 1;
+ safe_uv_width /= 2;
+ safe_uv_height += 1;
+ safe_uv_height /= 2;
+ int uv_width;
+ int uv_height;
+ if (!safe_uv_width.AssignIfValid(&uv_width) ||
+ !safe_uv_height.AssignIfValid(&uv_height)) {
+ DLOG(ERROR) << "Could not calculate subsampled dimensions";
+ OnError();
+ return;
+ }
+ gfx::Size uv_plane_size = gfx::Size(uv_width, uv_height);
+
+ // Create a gl::GLImage for each plane and attach it to a texture.
+ plane_sk_images.resize(3u);
+ for (size_t plane = 0u; plane < 3u; plane++) {
+ // |resource_cleaner| will be called to delete textures and GLImages that we
+ // create in this section in case of an early return.
+ CleanUpContext* resource = new CleanUpContext{};
+ resource->main_task_runner = channel_->task_runner();
+ resource->shared_context_state = shared_context_state.get();
+ // The use of base::Unretained() is safe because the |resource| is allocated
+ // using new and is deleted inside CleanUpResource().
+ base::ScopedClosureRunner resource_cleaner(
+ base::BindOnce(&CleanUpResource, base::Unretained(resource)));
+ glGenTextures(1u, &resource->texture);
+ glBindTexture(GL_TEXTURE_EXTERNAL_OES, resource->texture);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S,
+ GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T,
+ GL_CLAMP_TO_EDGE);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
+ glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
+ gfx::Size plane_size = plane == 0 ? y_plane_size : uv_plane_size;
+
+ // Extract the plane out of |completed_decode->handle| and put it in its own
+ // gfx::GpuMemoryBufferHandle so that we can create an R_8 image for the
+ // plane.
+ gfx::GpuMemoryBufferHandle plane_handle;
+ plane_handle.type = completed_decode->handle.type;
+ plane_handle.native_pixmap_handle.planes.push_back(
+ std::move(completed_decode->handle.native_pixmap_handle.planes[plane]));
+ scoped_refptr<gl::GLImage> plane_image;
+ if (external_image_factory_for_testing_) {
+ plane_image =
+ external_image_factory_for_testing_->CreateImageForGpuMemoryBuffer(
+ std::move(plane_handle), plane_size, gfx::BufferFormat::R_8,
+ -1 /* client_id */, kNullSurfaceHandle);
+ } else {
+ auto plane_pixmap = base::MakeRefCounted<gfx::NativePixmapDmaBuf>(
+ plane_size, gfx::BufferFormat::R_8,
+ std::move(plane_handle.native_pixmap_handle));
+ auto plane_image_native_pixmap =
+ base::MakeRefCounted<gl::GLImageNativePixmap>(plane_size,
+ gfx::BufferFormat::R_8);
+ if (plane_image_native_pixmap->Initialize(plane_pixmap))
+ plane_image = std::move(plane_image_native_pixmap);
+ }
+ if (!plane_image) {
+ DLOG(ERROR) << "Could not create GL image";
+ OnError();
+ return;
+ }
+ resource->gl_image = std::move(plane_image);
+ if (!resource->gl_image->BindTexImage(GL_TEXTURE_EXTERNAL_OES)) {
+ DLOG(ERROR) << "Could not bind GL image to texture";
+ OnError();
+ return;
+ }
+
+ // Create a SkImage using the texture.
+ const GrBackendTexture plane_backend_texture(
+ plane_size.width(), plane_size.height(), GrMipMapped::kNo,
+ GrGLTextureInfo{GL_TEXTURE_EXTERNAL_OES, resource->texture, GL_R8_EXT});
+ plane_sk_images[plane] = SkImage::MakeFromTexture(
+ shared_context_state->gr_context(), plane_backend_texture,
+ kTopLeft_GrSurfaceOrigin, kGray_8_SkColorType, kOpaque_SkAlphaType,
+ nullptr /* colorSpace */, CleanUpResource, resource);
+ if (!plane_sk_images[plane]) {
+ DLOG(ERROR) << "Could not create planar SkImage";
+ OnError();
+ return;
+ }
+ // No need for us to call the resource cleaner. Skia should do that.
+ resource_cleaner.Release().Reset();
+ }
+#else
+ // Right now, we only support Chrome OS because we need to use the
+ // |native_pixmap_handle| member of a GpuMemoryBufferHandle.
+ NOTIMPLEMENTED()
+ << "Image decode acceleration is unsupported for this platform";
+ OnError();
+ return;
+#endif
+
// Insert the cache entry in the transfer cache. Note that this section
// validates several of the IPC parameters: |params.raster_decoder_route_id|,
// |params.transfer_cache_entry_id|, |params.discardable_handle_shm_id|, and
@@ -195,15 +377,16 @@ void ImageDecodeAcceleratorStub::ProcessCompletedDecode(
return;
}
DCHECK(shared_context_state->transfer_cache());
- if (!shared_context_state->transfer_cache()->CreateLockedImageEntry(
- command_buffer->decoder_context()->GetRasterDecoderId(),
- params.transfer_cache_entry_id,
- ServiceDiscardableHandle(std::move(handle_buffer),
- params.discardable_handle_shm_offset,
- params.discardable_handle_shm_id),
- shared_context_state->gr_context(), completed_decode->GetData(),
- completed_decode->GetStride(), completed_decode->GetImageInfo(),
- params.needs_mips, params.target_color_space.ToSkColorSpace())) {
+ if (!shared_context_state->transfer_cache()
+ ->CreateLockedHardwareDecodedImageEntry(
+ command_buffer->decoder_context()->GetRasterDecoderId(),
+ params.transfer_cache_entry_id,
+ ServiceDiscardableHandle(std::move(handle_buffer),
+ params.discardable_handle_shm_offset,
+ params.discardable_handle_shm_id),
+ shared_context_state->gr_context(), std::move(plane_sk_images),
+ completed_decode->buffer_byte_size, params.needs_mips,
+ params.target_color_space.ToSkColorSpace())) {
DLOG(ERROR) << "Could not create and insert the transfer cache entry";
OnError();
return;
@@ -237,14 +420,8 @@ void ImageDecodeAcceleratorStub::OnDecodeCompleted(
return;
}
- // Some sanity checks on the output of the decoder.
- const SkImageInfo image_info = result->GetImageInfo();
- DCHECK_EQ(expected_output_size.width(), image_info.width());
- DCHECK_EQ(expected_output_size.height(), image_info.height());
- DCHECK_NE(0u, image_info.minRowBytes());
- DCHECK_GE(result->GetStride(), image_info.minRowBytes());
- DCHECK_GE(result->GetData().size(),
- image_info.computeByteSize(result->GetStride()));
+ // A sanity check on the output of the decoder.
+ DCHECK(expected_output_size == result->visible_size);
// The decode is ready to be processed: add it to |pending_completed_decodes_|
// so that ProcessCompletedDecode() can pick it up.
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
index 416aad79ff6..41256a3067e 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub.h
@@ -17,6 +17,7 @@
#include "base/thread_annotations.h"
#include "gpu/command_buffer/service/sequence_id.h"
#include "gpu/ipc/common/gpu_messages.h"
+#include "gpu/ipc/service/gpu_ipc_service_export.h"
#include "gpu/ipc/service/image_decode_accelerator_worker.h"
#include "ui/gfx/geometry/size.h"
@@ -30,6 +31,7 @@ class Message;
namespace gpu {
class GpuChannel;
+class ImageFactory;
class SyncPointClientState;
// Processes incoming image decode requests from renderers: it schedules the
@@ -46,7 +48,7 @@ class SyncPointClientState;
// An object of this class is meant to be used in
// both the IO thread (for receiving decode requests) and the main thread (for
// processing completed decodes).
-class ImageDecodeAcceleratorStub
+class GPU_IPC_SERVICE_EXPORT ImageDecodeAcceleratorStub
: public base::RefCountedThreadSafe<ImageDecodeAcceleratorStub> {
public:
// TODO(andrescj): right now, we only accept one worker to be used for JPEG
@@ -63,6 +65,8 @@ class ImageDecodeAcceleratorStub
// used.
void Shutdown();
+ void SetImageFactoryForTesting(ImageFactory* image_factory);
+
private:
friend class base::RefCountedThreadSafe<ImageDecodeAcceleratorStub>;
~ImageDecodeAcceleratorStub();
@@ -102,6 +106,8 @@ class ImageDecodeAcceleratorStub
bool destroying_channel_ GUARDED_BY(lock_) = false;
uint64_t last_release_count_ GUARDED_BY(lock_) = 0;
+ ImageFactory* external_image_factory_for_testing_ = nullptr;
+
scoped_refptr<base::SingleThreadTaskRunner> main_task_runner_;
scoped_refptr<base::SingleThreadTaskRunner> io_task_runner_;
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
index 8253f396348..f0d8eee23fc 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_stub_unittest.cc
@@ -12,12 +12,11 @@
#include "base/atomicops.h"
#include "base/bind.h"
#include "base/containers/queue.h"
-#include "base/containers/span.h"
+#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
-#include "base/numerics/checked_math.h"
#include "base/test/scoped_feature_list.h"
#include "base/test/test_simple_task_runner.h"
#include "cc/paint/image_transfer_cache_entry.h"
@@ -32,6 +31,7 @@
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/context_group.h"
#include "gpu/command_buffer/service/decoder_context.h"
+#include "gpu/command_buffer/service/image_factory.h"
#include "gpu/command_buffer/service/mocks.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/sequence_id.h"
@@ -48,14 +48,18 @@
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager.h"
#include "gpu/ipc/service/gpu_channel_test_common.h"
+#include "gpu/ipc/service/image_decode_accelerator_stub.h"
#include "gpu/ipc/service/image_decode_accelerator_worker.h"
#include "ipc/ipc_message.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "third_party/skia/include/core/SkImage.h"
-#include "third_party/skia/include/core/SkImageInfo.h"
#include "third_party/skia/include/core/SkSize.h"
+#include "ui/gfx/buffer_types.h"
#include "ui/gfx/color_space.h"
#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image_stub.h"
#include "url/gurl.h"
using testing::InSequence;
@@ -75,6 +79,34 @@ scoped_refptr<Buffer> MakeBufferForTesting() {
return MakeMemoryBuffer(sizeof(base::subtle::Atomic32));
}
+// This ImageFactory is defined so that we don't have to generate a real
+// GpuMemoryBuffer with decoded data in these tests.
+class TestImageFactory : public ImageFactory {
+ public:
+ TestImageFactory() = default;
+ ~TestImageFactory() override = default;
+
+ // ImageFactory implementation.
+ scoped_refptr<gl::GLImage> CreateImageForGpuMemoryBuffer(
+ gfx::GpuMemoryBufferHandle handle,
+ const gfx::Size& size,
+ gfx::BufferFormat format,
+ int client_id,
+ SurfaceHandle surface_handle) override {
+ return base::MakeRefCounted<gl::GLImageStub>();
+ }
+ bool SupportsCreateAnonymousImage() const override { return false; }
+ scoped_refptr<gl::GLImage> CreateAnonymousImage(const gfx::Size& size,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage,
+ bool* is_cleared) override {
+ NOTREACHED();
+ return nullptr;
+ }
+ unsigned RequiredTextureType() override { return GL_TEXTURE_EXTERNAL_OES; }
+ bool SupportsFormatRGB() override { return false; }
+};
+
} // namespace
// This mock allows individual tests to decide asynchronously when to finish a
@@ -83,35 +115,9 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
public:
MockImageDecodeAcceleratorWorker() {}
- class DecodeResult : public ImageDecodeAcceleratorWorker::DecodeResult {
- public:
- DecodeResult(std::vector<uint8_t> decoded_image,
- size_t stride,
- const SkImageInfo& image_info)
- : decoded_image_(std::move(decoded_image)),
- stride_(stride),
- image_info_(image_info) {}
-
- ~DecodeResult() override {}
-
- base::span<const uint8_t> GetData() const override {
- return base::make_span<const uint8_t>(decoded_image_.data(),
- decoded_image_.size());
- }
-
- size_t GetStride() const override { return stride_; }
-
- SkImageInfo GetImageInfo() const override { return image_info_; }
-
- private:
- const std::vector<uint8_t> decoded_image_;
- const size_t stride_ = 0;
- const SkImageInfo image_info_;
- };
-
void Decode(std::vector<uint8_t> encoded_data,
const gfx::Size& output_size,
- CompletedDecodeCB decode_cb) {
+ CompletedDecodeCB decode_cb) override {
pending_decodes_.push(PendingDecode{output_size, std::move(decode_cb)});
DoDecode(output_size);
}
@@ -122,17 +128,19 @@ class MockImageDecodeAcceleratorWorker : public ImageDecodeAcceleratorWorker {
PendingDecode next_decode = std::move(pending_decodes_.front());
pending_decodes_.pop();
if (success) {
- base::CheckedNumeric<size_t> row_bytes = 4u;
- row_bytes *= next_decode.output_size.width();
- base::CheckedNumeric<size_t> rgba_bytes = row_bytes;
- rgba_bytes *= next_decode.output_size.height();
- std::vector<uint8_t> rgba_output(rgba_bytes.ValueOrDie(), 0u);
- std::move(next_decode.decode_cb)
- .Run(std::make_unique<DecodeResult>(
- std::move(rgba_output), row_bytes.ValueOrDie(),
- SkImageInfo::Make(next_decode.output_size.width(),
- next_decode.output_size.height(),
- kRGBA_8888_SkColorType, kOpaque_SkAlphaType)));
+ // We give out a dummy GpuMemoryBufferHandle as the result: since we mock
+ // the ImageFactory and the gl::GLImage in these tests, the only
+ // requirement is that the NativePixmapHandle has 3 planes.
+ auto decode_result = std::make_unique<DecodeResult>();
+ decode_result->handle.type = gfx::GpuMemoryBufferType::NATIVE_PIXMAP;
+ for (size_t plane = 0; plane < 3u; plane++) {
+ decode_result->handle.native_pixmap_handle.planes.emplace_back(
+ 0 /* stride */, 0 /* offset */, 0 /* size */, base::ScopedFD());
+ }
+ decode_result->visible_size = next_decode.output_size;
+ decode_result->buffer_format = gfx::BufferFormat::YVU_420;
+ decode_result->buffer_byte_size = 0u;
+ std::move(next_decode.decode_cb).Run(std::move(decode_result));
} else {
std::move(next_decode.decode_cb).Run(nullptr);
}
@@ -195,6 +203,7 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
void SetUp() override {
GpuChannelTestCommon::SetUp();
+
// TODO(andrescj): get rid of the |feature_list_| when the feature is
// enabled by default.
feature_list_.InitAndEnableFeature(
@@ -213,6 +222,9 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
GpuChannel* channel = CreateChannel(kChannelId, false /* is_gpu_host */);
ASSERT_TRUE(channel);
+ ASSERT_TRUE(channel->GetImageDecodeAcceleratorStub());
+ channel->GetImageDecodeAcceleratorStub()->SetImageFactoryForTesting(
+ &image_factory_);
// Create a raster command buffer so that the ImageDecodeAcceleratorStub can
// have access to a TransferBufferManager. Note that we mock the
@@ -380,6 +392,11 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
transfer_cache->GetEntry(ServiceTransferCache::EntryKey(
raster_decoder_id, cc::TransferCacheEntryType::kImage, i + 1)));
ASSERT_TRUE(decode_entry);
+ ASSERT_EQ(3u, decode_entry->plane_images().size());
+ for (size_t plane = 0; plane < 3u; plane++) {
+ ASSERT_TRUE(decode_entry->plane_images()[plane]);
+ EXPECT_TRUE(decode_entry->plane_images()[plane]->isTextureBacked());
+ }
ASSERT_TRUE(decode_entry->image());
EXPECT_EQ(expected_sizes[i].width(),
decode_entry->image()->dimensions().width());
@@ -392,6 +409,7 @@ class ImageDecodeAcceleratorStubTest : public GpuChannelTestCommon {
StrictMock<MockImageDecodeAcceleratorWorker> image_decode_accelerator_worker_;
private:
+ TestImageFactory image_factory_;
base::test::ScopedFeatureList feature_list_;
base::WeakPtrFactory<ImageDecodeAcceleratorStubTest> weak_ptr_factory_;
@@ -673,4 +691,6 @@ TEST_F(ImageDecodeAcceleratorStubTest, WaitForDiscardableHandleRegistration) {
CheckTransferCacheEntries({SkISize::Make(100, 100)});
}
+// TODO(andrescj): test the deletion of transfer cache entries.
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/image_decode_accelerator_worker.h b/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
index 62fd14f20fd..14265c1fdbc 100644
--- a/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
+++ b/chromium/gpu/ipc/service/image_decode_accelerator_worker.h
@@ -13,11 +13,9 @@
#include "base/callback.h"
#include "base/containers/span.h"
-#include "third_party/skia/include/core/SkImageInfo.h"
-
-namespace gfx {
-class Size;
-} // namespace gfx
+#include "ui/gfx/buffer_types.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
@@ -27,16 +25,12 @@ class ImageDecodeAcceleratorWorker {
public:
virtual ~ImageDecodeAcceleratorWorker() {}
- // Encapsulates the result of a decode request giving implementations the
- // chance to do custom resource management (e.g., some resources may need to
- // be released when the decoded data is no longer needed). Implementations
- // should not assume that destruction happens on a specific thread.
- class DecodeResult {
- public:
- virtual ~DecodeResult() {}
- virtual base::span<const uint8_t> GetData() const = 0;
- virtual size_t GetStride() const = 0;
- virtual SkImageInfo GetImageInfo() const = 0;
+ // Encapsulates the result of a decode request.
+ struct DecodeResult {
+ gfx::GpuMemoryBufferHandle handle;
+ gfx::Size visible_size;
+ gfx::BufferFormat buffer_format;
+ size_t buffer_byte_size;
};
using CompletedDecodeCB =
@@ -44,13 +38,10 @@ class ImageDecodeAcceleratorWorker {
// Enqueue a decode of |encoded_data|. The |decode_cb| is called
// asynchronously when the decode completes passing as parameter DecodeResult
- // containing the decoded image. For a successful decode, implementations must
- // guarantee that:
- //
- // 1) GetImageInfo().width() == |output_size|.width().
- // 2) GetImageInfo().height() == |output_size|.height().
- // 3) GetStride() >= GetImageInfo().minRowBytes().
- // 4) GetData().size() >= GetImageInfo().computeByteSize(stride()).
+ // containing a reference to the decoded image (in the form of a
+ // gfx::GpuMemoryBufferHandle). The |buffer_byte_size| is the size of the
+ // buffer that |handle| refers to. For a successful decode, implementations
+ // must guarantee that |visible_size| == |output_size|.
//
// If the decode fails, |decode_cb| is called asynchronously with nullptr.
// Callbacks should be called in the order that this method is called.
diff --git a/chromium/gpu/ipc/service/image_transport_surface_android.cc b/chromium/gpu/ipc/service/image_transport_surface_android.cc
index 2fcc8f095c1..22ffeb90fbb 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_android.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_android.cc
@@ -29,8 +29,9 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
DCHECK_NE(surface_handle, kNullSurfaceHandle);
// On Android, the surface_handle is the id of the surface in the
// GpuSurfaceTracker/GpuSurfaceLookup
- ANativeWindow* window =
- GpuSurfaceLookup::GetInstance()->AcquireNativeWidget(surface_handle);
+ bool can_be_used_with_surface_control = false;
+ ANativeWindow* window = GpuSurfaceLookup::GetInstance()->AcquireNativeWidget(
+ surface_handle, &can_be_used_with_surface_control);
if (!window) {
LOG(WARNING) << "Failed to acquire native widget.";
return nullptr;
@@ -38,7 +39,8 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
scoped_refptr<gl::GLSurface> surface;
if (delegate &&
- delegate->GetFeatureInfo()->feature_flags().android_surface_control) {
+ delegate->GetFeatureInfo()->feature_flags().android_surface_control &&
+ can_be_used_with_surface_control) {
surface = new gl::GLSurfaceEGLSurfaceControl(
window, base::ThreadTaskRunnerHandle::Get());
} else {
diff --git a/chromium/gpu/ipc/service/image_transport_surface_delegate.h b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
index 76a99b99280..52f9d6f68ca 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_delegate.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_delegate.h
@@ -6,6 +6,7 @@
#define GPU_IPC_SERVICE_IMAGE_TRANSPORT_SURFACE_DELEGATE_H_
#include "base/callback.h"
+#include "components/viz/common/gpu/gpu_vsync_callback.h"
#include "gpu/command_buffer/common/texture_in_use_response.h"
#include "gpu/ipc/common/surface_handle.h"
#include "gpu/ipc/service/gpu_ipc_service_export.h"
@@ -51,6 +52,9 @@ class GPU_IPC_SERVICE_EXPORT ImageTransportSurfaceDelegate {
// Gets route ID for sending / receiving IPC messages.
virtual int32_t GetRouteID() const = 0;
+ // Callback for GPU vsync signal. May be called on a different thread.
+ virtual viz::GpuVSyncCallback GetGpuVSyncCallback() = 0;
+
protected:
virtual ~ImageTransportSurfaceDelegate() = default;
};
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index 8b9e3875e4b..8b72554c48a 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -67,7 +67,6 @@ class ImageTransportSurfaceOverlayMac : public gl::GLSurface,
void ScheduleCALayerInUseQuery(
std::vector<CALayerInUseQuery> queries) override;
bool IsSurfaceless() const override;
- bool SupportsPresentationCallback() override;
// ui::GpuSwitchingObserver implementation.
void OnGpuSwitched() override;
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index 67d26f0c669..cf6b3450c20 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -273,10 +273,6 @@ bool ImageTransportSurfaceOverlayMac::IsSurfaceless() const {
return true;
}
-bool ImageTransportSurfaceOverlayMac::SupportsPresentationCallback() {
- return true;
-}
-
bool ImageTransportSurfaceOverlayMac::Resize(const gfx::Size& pixel_size,
float scale_factor,
ColorSpace color_space,
diff --git a/chromium/gpu/ipc/service/image_transport_surface_win.cc b/chromium/gpu/ipc/service/image_transport_surface_win.cc
index 5306c000412..0b1526c8d7a 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_win.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_win.cc
@@ -7,10 +7,11 @@
#include <memory>
#include "base/win/windows_version.h"
+#include "gpu/command_buffer/service/feature_info.h"
#include "gpu/config/gpu_preferences.h"
-#include "gpu/ipc/service/direct_composition_surface_win.h"
#include "gpu/ipc/service/pass_through_image_transport_surface.h"
#include "ui/gfx/native_widget_types.h"
+#include "ui/gl/direct_composition_surface_win.h"
#include "ui/gl/gl_bindings.h"
#include "ui/gl/gl_implementation.h"
#include "ui/gl/gl_surface_egl.h"
@@ -34,11 +35,22 @@ scoped_refptr<gl::GLSurface> ImageTransportSurface::CreateNativeSurface(
auto vsync_provider =
std::make_unique<gl::VSyncProviderWin>(surface_handle);
- if (DirectCompositionSurfaceWin::IsDirectCompositionSupported()) {
- surface = base::MakeRefCounted<DirectCompositionSurfaceWin>(
- std::move(vsync_provider), delegate, surface_handle);
- if (!surface->Initialize(gl::GLSurfaceFormat()))
+ if (gl::DirectCompositionSurfaceWin::IsDirectCompositionSupported()) {
+ const auto& workarounds = delegate->GetFeatureInfo()->workarounds();
+ gl::DirectCompositionSurfaceWin::Settings settings;
+ settings.disable_nv12_dynamic_textures =
+ workarounds.disable_nv12_dynamic_textures;
+ settings.disable_larger_than_screen_overlays =
+ workarounds.disable_larger_than_screen_overlays;
+ auto vsync_callback = delegate->GetGpuVSyncCallback();
+ auto dc_surface = base::MakeRefCounted<gl::DirectCompositionSurfaceWin>(
+ std::move(vsync_provider), std::move(vsync_callback), surface_handle,
+ settings);
+ if (!dc_surface->Initialize(gl::GLSurfaceFormat()))
return nullptr;
+ delegate->DidCreateAcceleratedSurfaceChildWindow(surface_handle,
+ dc_surface->window());
+ surface = std::move(dc_surface);
} else {
surface = gl::InitializeGLSurface(
base::MakeRefCounted<gl::NativeViewGLSurfaceEGL>(
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index 7ecdc0fc1f0..89a2c9c69cf 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -47,7 +47,6 @@ PassThroughImageTransportSurface::~PassThroughImageTransportSurface() {
}
bool PassThroughImageTransportSurface::Initialize(gl::GLSurfaceFormat format) {
- DCHECK(gl::GLSurfaceAdapter::SupportsPresentationCallback());
// The surface is assumed to have already been initialized.
return true;
}
diff --git a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
index 7fd2cd32b69..d2d4bb303bc 100644
--- a/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/raster_command_buffer_stub.cc
@@ -85,7 +85,7 @@ gpu::ContextResult RasterCommandBufferStub::Initialize(
return ContextResult::kFatalFailure;
}
- if (init_params.attribs.gpu_preference != gl::PreferIntegratedGpu ||
+ if (init_params.attribs.gpu_preference != gl::GpuPreference::kLowPower ||
init_params.attribs.context_type != CONTEXT_TYPE_OPENGLES2 ||
init_params.attribs.bind_generates_resource) {
LOG(ERROR) << "ContextResult::kFatalFailure: Incompatible creation attribs "
diff --git a/chromium/gpu/ipc/service/shared_image_stub.cc b/chromium/gpu/ipc/service/shared_image_stub.cc
index 0ec3e83f6b6..aee47a80983 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.cc
+++ b/chromium/gpu/ipc/service/shared_image_stub.cc
@@ -75,6 +75,10 @@ bool SharedImageStub::OnMessageReceived(const IPC::Message& msg) {
IPC_MESSAGE_HANDLER(GpuChannelMsg_DestroySharedImage, OnDestroySharedImage)
IPC_MESSAGE_HANDLER(GpuChannelMsg_RegisterSharedImageUploadBuffer,
OnRegisterSharedImageUploadBuffer)
+#if defined(OS_WIN)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_CreateSwapChain, OnCreateSwapChain)
+ IPC_MESSAGE_HANDLER(GpuChannelMsg_PresentSwapChain, OnPresentSwapChain)
+#endif // OS_WIN
IPC_MESSAGE_UNHANDLED(handled = false)
IPC_END_MESSAGE_MAP()
return handled;
@@ -254,6 +258,61 @@ void SharedImageStub::OnDestroySharedImage(const Mailbox& mailbox) {
}
}
+#if defined(OS_WIN)
+void SharedImageStub::OnCreateSwapChain(
+ const GpuChannelMsg_CreateSwapChain_Params& params) {
+ TRACE_EVENT0("gpu", "SharedImageStub::OnCreateSwapChain");
+
+ if (!params.front_buffer_mailbox.IsSharedImage() ||
+ !params.back_buffer_mailbox.IsSharedImage()) {
+ DLOG(ERROR) << "SharedImageStub: Trying to access SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
+ if (!MakeContextCurrent()) {
+ OnError();
+ return;
+ }
+
+ if (!factory_->CreateSwapChain(
+ params.front_buffer_mailbox, params.back_buffer_mailbox,
+ params.format, params.size, params.color_space, params.usage)) {
+ DLOG(ERROR) << "SharedImageStub: Unable to create swap chain";
+ OnError();
+ return;
+ }
+
+ sync_point_client_state_->ReleaseFenceSync(params.release_id);
+}
+
+void SharedImageStub::OnPresentSwapChain(const Mailbox& mailbox,
+ uint32_t release_id) {
+ TRACE_EVENT0("gpu", "SharedImageStub::OnPresentSwapChain");
+
+ if (!mailbox.IsSharedImage()) {
+ DLOG(ERROR) << "SharedImageStub: Trying to access a SharedImage with a "
+ "non-SharedImage mailbox.";
+ OnError();
+ return;
+ }
+
+ if (!MakeContextCurrent()) {
+ OnError();
+ return;
+ }
+
+ if (!factory_->PresentSwapChain(mailbox)) {
+ DLOG(ERROR) << "SharedImageStub: Unable to present swap chain";
+ OnError();
+ return;
+ }
+
+ sync_point_client_state_->ReleaseFenceSync(release_id);
+}
+#endif // OS_WIN
+
void SharedImageStub::OnRegisterSharedImageUploadBuffer(
base::ReadOnlySharedMemoryRegion shm) {
TRACE_EVENT0("gpu", "SharedImageStub::OnRegisterSharedImageUploadBuffer");
diff --git a/chromium/gpu/ipc/service/shared_image_stub.h b/chromium/gpu/ipc/service/shared_image_stub.h
index 99792469916..088ff0a139b 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.h
+++ b/chromium/gpu/ipc/service/shared_image_stub.h
@@ -7,6 +7,7 @@
#include "base/memory/weak_ptr.h"
#include "base/trace_event/memory_dump_provider.h"
+#include "build/build_config.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/sequence_id.h"
@@ -66,6 +67,11 @@ class GPU_IPC_SERVICE_EXPORT SharedImageStub
void OnUpdateSharedImage(const Mailbox& mailbox, uint32_t release_id);
void OnDestroySharedImage(const Mailbox& mailbox);
void OnRegisterSharedImageUploadBuffer(base::ReadOnlySharedMemoryRegion shm);
+#if defined(OS_WIN)
+ void OnCreateSwapChain(const GpuChannelMsg_CreateSwapChain_Params& params);
+ void OnPresentSwapChain(const Mailbox& mailbox, uint32_t release_id);
+#endif // OS_WIN
+
bool MakeContextCurrent();
ContextResult MakeContextCurrentAndCreateFactory();
void OnError();
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index 1b66ba80100..8d6d9f6f05c 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -223,6 +223,30 @@ void StreamTexture::OnForwardForSurfaceRequest(
surface_owner_.get());
}
+void StreamTexture::OnSetSize(const gfx::Size& size) {
+ size_ = size;
+ if (!owner_stub_ || !surface_owner_.get())
+ return;
+
+ gles2::ContextGroup* context_group =
+ owner_stub_->decoder_context()->GetContextGroup();
+ DCHECK(context_group);
+ TextureManager* texture_manager = context_group->texture_manager();
+ gles2::Texture* texture =
+ texture_manager->GetTextureForServiceId(texture_id_);
+ if (texture) {
+ // SetLevelInfo will reset the image / stream texture image, which may be
+ // the last reference to |this|, so keep a reference around, and make sure
+ // to reset the stream texture image.
+ scoped_refptr<StreamTexture> self(this);
+ texture->SetLevelInfo(GL_TEXTURE_EXTERNAL_OES, 0, GL_RGBA, size.width(),
+ size.height(), 1, 0, GL_RGBA, GL_UNSIGNED_BYTE,
+ gfx::Rect(size));
+ texture->SetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES, 0, this,
+ gles2::Texture::UNBOUND, 0);
+ }
+}
+
StreamTexture::BindOrCopy StreamTexture::ShouldBindOrCopy() {
return COPY;
}
diff --git a/chromium/gpu/ipc/service/stream_texture_android.h b/chromium/gpu/ipc/service/stream_texture_android.h
index 0fbac37eda4..aabd38f910e 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.h
+++ b/chromium/gpu/ipc/service/stream_texture_android.h
@@ -90,7 +90,7 @@ class StreamTexture : public gpu::gles2::GLStreamTextureImage,
// IPC message handlers:
void OnStartListening();
void OnForwardForSurfaceRequest(const base::UnguessableToken& request_token);
- void OnSetSize(const gfx::Size& size) { size_ = size; }
+ void OnSetSize(const gfx::Size& size);
std::unique_ptr<SurfaceOwner> surface_owner_;
diff --git a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
index a8c963d0d88..ad83c5bf8eb 100644
--- a/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
+++ b/chromium/gpu/ipc/service/webgpu_command_buffer_stub.cc
@@ -106,7 +106,8 @@ gpu::ContextResult WebGPUCommandBufferStub::Initialize(
command_buffer_ =
std::make_unique<CommandBufferService>(this, memory_tracker_.get());
std::unique_ptr<webgpu::WebGPUDecoder> decoder(webgpu::WebGPUDecoder::Create(
- this, command_buffer_.get(), manager->outputter()));
+ this, command_buffer_.get(), manager->shared_image_manager(),
+ memory_tracker_.get(), manager->outputter()));
sync_point_client_state_ =
channel_->sync_point_manager()->CreateSyncPointClientState(
diff --git a/chromium/gpu/ipc/webgpu_in_process_context.cc b/chromium/gpu/ipc/webgpu_in_process_context.cc
index b20308b9240..eb52db7a902 100644
--- a/chromium/gpu/ipc/webgpu_in_process_context.cc
+++ b/chromium/gpu/ipc/webgpu_in_process_context.cc
@@ -58,9 +58,8 @@ ContextResult WebGPUInProcessContext::Initialize(
static const scoped_refptr<gl::GLSurface> surface = nullptr;
static constexpr bool is_offscreen = true;
- static constexpr InProcessCommandBuffer* share_group = nullptr;
auto result = command_buffer_->Initialize(
- surface, is_offscreen, kNullSurfaceHandle, attribs, share_group,
+ surface, is_offscreen, kNullSurfaceHandle, attribs,
gpu_memory_buffer_manager, image_factory, gpu_channel_manager_delegate,
client_task_runner_, nullptr, nullptr);
if (result != ContextResult::kSuccess) {
diff --git a/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc b/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
index 78425e1c8b0..c7864c19794 100644
--- a/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
+++ b/chromium/gpu/tools/compositor_model_bench/compositor_model_bench.cc
@@ -26,9 +26,9 @@
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/location.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/task/single_thread_task_executor.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "gpu/tools/compositor_model_bench/render_model_utils.h"
@@ -116,7 +116,7 @@ class Simulator {
LOG(INFO) << "Running " << sims_remaining_.size() << " simulations.";
- message_loop_.task_runner()->PostTask(
+ single_thread_task_executor_.task_runner()->PostTask(
FROM_HERE,
base::BindOnce(&Simulator::ProcessEvents, weak_factory_.GetWeakPtr()));
run_loop_.Run();
@@ -324,7 +324,7 @@ class Simulator {
current_sim_->Resize(window_width_, window_height_);
}
- base::MessageLoop message_loop_;
+ base::SingleThreadTaskExecutor single_thread_task_executor_;
base::RunLoop run_loop_;
// Simulation task list for this execution
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 5a440b9dd92..066ae742fa8 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -98,6 +98,14 @@ if (enable_vulkan) {
sources = [
"tests/native_window.h",
]
+
+ # Vulkan Swiftshader can only be built on Linux.
+ # TODO(samans): Support more platforms. https://crbug.com/963988
+ if (use_x11) {
+ data_deps = [
+ "//third_party/swiftshader/src/Vulkan:swiftshader_libvulkan",
+ ]
+ }
deps = [
"//ui/gfx",
"//ui/gfx:native_widget_types",
diff --git a/chromium/gpu/vulkan/android/BUILD.gn b/chromium/gpu/vulkan/android/BUILD.gn
index c9c4e594a16..b78a2ac9101 100644
--- a/chromium/gpu/vulkan/android/BUILD.gn
+++ b/chromium/gpu/vulkan/android/BUILD.gn
@@ -24,6 +24,7 @@ component("android") {
defines = [ "IS_VULKAN_ANDROID_IMPL" ]
deps = [
+ "//gpu/ipc/common:common",
"//ui/gfx",
]
diff --git a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
index 44def3e6812..e9bfdbd3857 100644
--- a/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
+++ b/chromium/gpu/vulkan/android/vulkan_android_unittests.cc
@@ -51,7 +51,7 @@ class VulkanImplementationAndroidTest : public testing::Test {
}
protected:
- std::unique_ptr<VulkanImplementationAndroid> vk_implementation_;
+ std::unique_ptr<VulkanImplementation> vk_implementation_;
scoped_refptr<viz::VulkanInProcessContextProvider> vk_context_provider_;
VkDevice vk_device_;
VkPhysicalDevice vk_phy_device_;
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
index 2a7ef88f0bf..e3b427e00a2 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.cc
@@ -8,6 +8,7 @@
#include "base/bind_helpers.h"
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "gpu/ipc/common/vulkan_ycbcr_info.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_instance.h"
@@ -15,9 +16,59 @@
#include "gpu/vulkan/vulkan_surface.h"
#include "gpu/vulkan/vulkan_util.h"
#include "ui/gfx/gpu_fence.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
+namespace {
+bool GetAhbProps(
+ const VkDevice& vk_device,
+ AHardwareBuffer* hardware_buffer,
+ VkAndroidHardwareBufferFormatPropertiesANDROID* ahb_format_props,
+ VkAndroidHardwareBufferPropertiesANDROID* ahb_props) {
+ DCHECK(ahb_format_props);
+ DCHECK(ahb_props);
+
+ // To obtain format properties of an Android hardware buffer, include an
+ // instance of VkAndroidHardwareBufferFormatPropertiesANDROID in the pNext
+ // chain of the VkAndroidHardwareBufferPropertiesANDROID instance passed to
+ // vkGetAndroidHardwareBufferPropertiesANDROID.
+ ahb_format_props->sType =
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
+ ahb_format_props->pNext = nullptr;
+
+ ahb_props->sType =
+ VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
+ ahb_props->pNext = ahb_format_props;
+
+ bool result = vkGetAndroidHardwareBufferPropertiesANDROID(
+ vk_device, hardware_buffer, ahb_props);
+ if (result != VK_SUCCESS) {
+ LOG(ERROR)
+ << "GetAhbProps: vkGetAndroidHardwareBufferPropertiesANDROID failed : "
+ << result;
+ return false;
+ }
+ return true;
+}
+
+void PopulateYcbcrInfo(
+ const VkAndroidHardwareBufferFormatPropertiesANDROID& ahb_format_props,
+ VulkanYCbCrInfo* ycbcr_info) {
+ DCHECK(ycbcr_info);
+
+ ycbcr_info->suggested_ycbcr_model = ahb_format_props.suggestedYcbcrModel;
+ ycbcr_info->suggested_ycbcr_range = ahb_format_props.suggestedYcbcrRange;
+ ycbcr_info->suggested_xchroma_offset =
+ ahb_format_props.suggestedXChromaOffset;
+ ycbcr_info->suggested_ychroma_offset =
+ ahb_format_props.suggestedYChromaOffset;
+ ycbcr_info->external_format = ahb_format_props.externalFormat;
+ ycbcr_info->format_features = ahb_format_props.formatFeatures;
+}
+
+} // namespace
+
VulkanImplementationAndroid::VulkanImplementationAndroid() = default;
VulkanImplementationAndroid::~VulkanImplementationAndroid() = default;
@@ -134,6 +185,25 @@ VulkanImplementationAndroid::GetExternalImageHandleType() {
return VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
}
+bool VulkanImplementationAndroid::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return false;
+}
+
+bool VulkanImplementationAndroid::CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) {
+ // TODO(sergeyu): Move code from CreateVkImageAndImportAHB() here and remove
+ // CreateVkImageAndImportAHB().
+ NOTIMPLEMENTED();
+ return false;
+}
+
bool VulkanImplementationAndroid::CreateVkImageAndImportAHB(
const VkDevice& vk_device,
const VkPhysicalDevice& vk_physical_device,
@@ -142,33 +212,19 @@ bool VulkanImplementationAndroid::CreateVkImageAndImportAHB(
VkImage* vk_image,
VkImageCreateInfo* vk_image_info,
VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size) {
+ VkDeviceSize* mem_allocation_size,
+ VulkanYCbCrInfo* ycbcr_info) {
DCHECK(ahb_handle.is_valid());
DCHECK(vk_image);
DCHECK(vk_image_info);
DCHECK(vk_device_memory);
DCHECK(mem_allocation_size);
- // To obtain format properties of an Android hardware buffer, include an
- // instance of VkAndroidHardwareBufferFormatPropertiesANDROID in the pNext
- // chain of the VkAndroidHardwareBufferPropertiesANDROID instance passed to
- // vkGetAndroidHardwareBufferPropertiesANDROID.
- VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props;
- ahb_format_props.sType =
- VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_FORMAT_PROPERTIES_ANDROID;
- ahb_format_props.pNext = nullptr;
-
- VkAndroidHardwareBufferPropertiesANDROID ahb_props;
- ahb_props.sType =
- VK_STRUCTURE_TYPE_ANDROID_HARDWARE_BUFFER_PROPERTIES_ANDROID;
- ahb_props.pNext = &ahb_format_props;
-
- bool result = vkGetAndroidHardwareBufferPropertiesANDROID(
- vk_device, ahb_handle.get(), &ahb_props);
- if (result != VK_SUCCESS) {
- LOG(ERROR) << "GetAndroidHardwareBufferProperties failed : " << result;
+ // Get the image format properties of an Android hardware buffer.
+ VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
+ VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
+ if (!GetAhbProps(vk_device, ahb_handle.get(), &ahb_format_props, &ahb_props))
return false;
- }
// To create an image with an external format, include an instance of
// VkExternalFormatANDROID in the pNext chain of VkImageCreateInfo.
@@ -262,7 +318,7 @@ bool VulkanImplementationAndroid::CreateVkImageAndImportAHB(
vk_image_info->initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
// Create Vk Image.
- result = vkCreateImage(vk_device, vk_image_info, nullptr, vk_image);
+ bool result = vkCreateImage(vk_device, vk_image_info, nullptr, vk_image);
if (result != VK_SUCCESS) {
LOG(ERROR) << "vkCreateImage failed : " << result;
return false;
@@ -322,6 +378,24 @@ bool VulkanImplementationAndroid::CreateVkImageAndImportAHB(
}
*mem_allocation_size = mem_alloc_info.allocationSize;
+ if (ycbcr_info)
+ PopulateYcbcrInfo(ahb_format_props, ycbcr_info);
+ return true;
+}
+
+bool VulkanImplementationAndroid::GetSamplerYcbcrConversionInfo(
+ const VkDevice& vk_device,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ VulkanYCbCrInfo* ycbcr_info) {
+ DCHECK(ycbcr_info);
+
+ // Get the image format properties of an Android hardware buffer.
+ VkAndroidHardwareBufferFormatPropertiesANDROID ahb_format_props = {};
+ VkAndroidHardwareBufferPropertiesANDROID ahb_props = {};
+ if (!GetAhbProps(vk_device, ahb_handle.get(), &ahb_format_props, &ahb_props))
+ return false;
+
+ PopulateYcbcrInfo(ahb_format_props, ycbcr_info);
return true;
}
diff --git a/chromium/gpu/vulkan/android/vulkan_implementation_android.h b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
index 6657900dfc0..f1f316e35e4 100644
--- a/chromium/gpu/vulkan/android/vulkan_implementation_android.h
+++ b/chromium/gpu/vulkan/android/vulkan_implementation_android.h
@@ -41,6 +41,16 @@ class COMPONENT_EXPORT(VULKAN_ANDROID) VulkanImplementationAndroid
SemaphoreHandle GetSemaphoreHandle(VkDevice vk_device,
VkSemaphore vk_semaphore) override;
VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) override;
bool CreateVkImageAndImportAHB(
const VkDevice& vk_device,
const VkPhysicalDevice& vk_physical_device,
@@ -49,7 +59,12 @@ class COMPONENT_EXPORT(VULKAN_ANDROID) VulkanImplementationAndroid
VkImage* vk_image,
VkImageCreateInfo* vk_image_info,
VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size) override;
+ VkDeviceSize* mem_allocation_size,
+ VulkanYCbCrInfo* ycbcr_info) override;
+ bool GetSamplerYcbcrConversionInfo(
+ const VkDevice& vk_device,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ VulkanYCbCrInfo* ycbcr_info) override;
private:
VulkanInstance vulkan_instance_;
diff --git a/chromium/gpu/vulkan/demo/main.cc b/chromium/gpu/vulkan/demo/main.cc
index 5abc4f6cba7..35b66efcb4a 100644
--- a/chromium/gpu/vulkan/demo/main.cc
+++ b/chromium/gpu/vulkan/demo/main.cc
@@ -36,7 +36,7 @@ int main(int argc, char** argv) {
// Build UI thread message loop. This is used by platform
// implementations for event polling & running background tasks.
base::MessageLoopForUI message_loop;
- base::ThreadPool::CreateAndStartWithDefaultParams("VulkanDemo");
+ base::ThreadPoolInstance::CreateAndStartWithDefaultParams("VulkanDemo");
gpu::VulkanDemo vulkan_demo;
vulkan_demo.Initialize();
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.cc b/chromium/gpu/vulkan/demo/vulkan_demo.cc
index c75f0784f6a..451ae9f5453 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.cc
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.cc
@@ -15,6 +15,7 @@
#include "third_party/skia/include/core/SkFont.h"
#include "third_party/skia/include/core/SkSurface.h"
#include "third_party/skia/include/effects/SkGradientShader.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrContext.h"
#include "ui/events/platform/platform_event_source.h"
@@ -99,19 +100,16 @@ void VulkanDemo::OnAcceleratedWidgetAvailable(gfx::AcceleratedWidget widget) {
}
void VulkanDemo::CreateSkSurface() {
- auto* swap_chain = vulkan_surface_->GetSwapChain();
- auto index = swap_chain->current_image();
- auto& sk_surface = sk_surfaces_[index];
+ scoped_write_.emplace(vulkan_surface_->GetSwapChain());
+ auto& sk_surface = sk_surfaces_[scoped_write_->image_index()];
if (!sk_surface) {
SkSurfaceProps surface_props =
SkSurfaceProps(0, SkSurfaceProps::kLegacyFontHost_InitType);
- VkImage vk_image = swap_chain->GetCurrentImage();
- VkImageLayout vk_image_layout = swap_chain->GetCurrentImageLayout();
GrVkImageInfo vk_image_info;
- vk_image_info.fImage = vk_image;
+ vk_image_info.fImage = scoped_write_->image();
vk_image_info.fAlloc = {VK_NULL_HANDLE, 0, 0, 0};
- vk_image_info.fImageLayout = vk_image_layout;
+ vk_image_info.fImageLayout = scoped_write_->image_layout();
vk_image_info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
vk_image_info.fFormat = VK_FORMAT_B8G8R8A8_UNORM;
vk_image_info.fLevelCount = 1;
@@ -125,9 +123,13 @@ void VulkanDemo::CreateSkSurface() {
} else {
auto backend = sk_surface->getBackendRenderTarget(
SkSurface::kFlushRead_BackendHandleAccess);
- backend.setVkImageLayout(swap_chain->GetCurrentImageLayout());
+ backend.setVkImageLayout(scoped_write_->image_layout());
}
sk_surface_ = sk_surface;
+ GrBackendSemaphore semaphore;
+ semaphore.initVulkan(scoped_write_->TakeBeginSemaphore());
+ auto result = sk_surface_->wait(1, &semaphore);
+ DCHECK(result);
}
void VulkanDemo::Draw(SkCanvas* canvas, float fraction) {
@@ -182,7 +184,6 @@ void VulkanDemo::Draw(SkCanvas* canvas, float fraction) {
canvas->drawString(message, 0, 0, font, paint);
canvas->restore();
- canvas->flush();
}
void VulkanDemo::RenderFrame() {
@@ -190,13 +191,21 @@ void VulkanDemo::RenderFrame() {
return;
CreateSkSurface();
Draw(sk_surface_->getCanvas(), 0.7);
+ GrBackendSemaphore semaphore;
+ GrFlushInfo flush_info = {
+ .fFlags = kNone_GrFlushFlags,
+ .fNumSemaphores = 1,
+ .fSignalSemaphores = &semaphore,
+ };
+ sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent, flush_info);
auto backend = sk_surface_->getBackendRenderTarget(
SkSurface::kFlushRead_BackendHandleAccess);
GrVkImageInfo vk_image_info;
if (!backend.getVkImageInfo(&vk_image_info))
NOTREACHED() << "Failed to get image info";
- vulkan_surface_->GetSwapChain()->SetCurrentImageLayout(
- vk_image_info.fImageLayout);
+ scoped_write_->set_image_layout(vk_image_info.fImageLayout);
+ scoped_write_->SetEndSemaphore(semaphore.vkSemaphore());
+ scoped_write_.reset();
vulkan_surface_->SwapBuffers();
base::ThreadTaskRunnerHandle::Get()->PostTask(
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.h b/chromium/gpu/vulkan/demo/vulkan_demo.h
index 9e9ae1229ba..577f8fc52b9 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.h
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.h
@@ -8,6 +8,8 @@
#include <memory>
#include "base/memory/scoped_refptr.h"
+#include "base/optional.h"
+#include "gpu/vulkan/vulkan_swap_chain.h"
#include "third_party/skia/include/core/SkRefCnt.h"
#include "ui/gfx/geometry/size.h"
#include "ui/platform_window/platform_window_delegate.h"
@@ -59,12 +61,13 @@ class VulkanDemo : public ui::PlatformWindowDelegate {
void Draw(SkCanvas* canvas, float fraction);
void RenderFrame();
- std::unique_ptr<gpu::VulkanImplementation> vulkan_implementation_;
+ std::unique_ptr<VulkanImplementation> vulkan_implementation_;
scoped_refptr<viz::VulkanContextProvider> vulkan_context_provider_;
gfx::AcceleratedWidget accelerated_widget_ = gfx::kNullAcceleratedWidget;
std::unique_ptr<ui::PlatformEventSource> event_source_;
std::unique_ptr<ui::PlatformWindow> window_;
- std::unique_ptr<gpu::VulkanSurface> vulkan_surface_;
+ std::unique_ptr<VulkanSurface> vulkan_surface_;
+ base::Optional<VulkanSwapChain::ScopedWrite> scoped_write_;
sk_sp<SkSurface> sk_surface_;
std::vector<sk_sp<SkSurface>> sk_surfaces_;
float rotation_angle_ = 0;
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index 278fd0cb723..09e5a2e7ab0 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -31,7 +31,9 @@ VULKAN_INSTANCE_FUNCTIONS = [
VULKAN_PHYSICAL_DEVICE_FUNCTIONS = [
{ 'name': 'vkCreateDevice' },
{ 'name': 'vkEnumerateDeviceLayerProperties' },
+{ 'name': 'vkGetPhysicalDeviceMemoryProperties'},
{ 'name': 'vkGetPhysicalDeviceQueueFamilyProperties' },
+{ 'name': 'vkGetPhysicalDeviceProperties' },
# The following functions belong here but are handled specially:
# vkGetPhysicalDeviceSurfaceCapabilitiesKHR
# vkGetPhysicalDeviceSurfaceFormatsKHR
@@ -43,8 +45,10 @@ VULKAN_DEVICE_FUNCTIONS = [
{ 'name': 'vkAllocateCommandBuffers' },
{ 'name': 'vkAllocateDescriptorSets' },
{ 'name': 'vkAllocateMemory' },
+{ 'name': 'vkBindBufferMemory' },
{ 'name': 'vkBindImageMemory' },
{ 'name': 'vkCreateCommandPool' },
+{ 'name': 'vkCreateBuffer' },
{ 'name': 'vkCreateDescriptorPool' },
{ 'name': 'vkCreateDescriptorSetLayout' },
{ 'name': 'vkCreateFence' },
@@ -55,6 +59,7 @@ VULKAN_DEVICE_FUNCTIONS = [
{ 'name': 'vkCreateSampler' },
{ 'name': 'vkCreateSemaphore' },
{ 'name': 'vkCreateShaderModule' },
+{ 'name': 'vkDestroyBuffer' },
{ 'name': 'vkDestroyCommandPool' },
{ 'name': 'vkDestroyDescriptorPool' },
{ 'name': 'vkDestroyDescriptorSetLayout' },
@@ -71,10 +76,13 @@ VULKAN_DEVICE_FUNCTIONS = [
{ 'name': 'vkFreeCommandBuffers' },
{ 'name': 'vkFreeDescriptorSets' },
{ 'name': 'vkFreeMemory' },
+{ 'name': 'vkGetBufferMemoryRequirements' },
{ 'name': 'vkGetDeviceQueue' },
{ 'name': 'vkGetFenceStatus' },
{ 'name': 'vkGetImageMemoryRequirements' },
+{ 'name': 'vkMapMemory' },
{ 'name': 'vkResetFences' },
+{ 'name': 'vkUnmapMemory' },
{ 'name': 'vkUpdateDescriptorSets' },
{ 'name': 'vkWaitForFences' },
]
@@ -109,6 +117,7 @@ VULKAN_QUEUE_FUNCTIONS = [
VULKAN_COMMAND_BUFFER_FUNCTIONS = [
{ 'name': 'vkBeginCommandBuffer' },
{ 'name': 'vkCmdBeginRenderPass' },
+{ 'name': 'vkCmdCopyBufferToImage' },
{ 'name': 'vkCmdEndRenderPass' },
{ 'name': 'vkCmdExecuteCommands' },
{ 'name': 'vkCmdNextSubpass' },
@@ -202,7 +211,12 @@ struct VulkanFunctionPointers {
VULKAN_EXPORT bool BindPhysicalDeviceFunctionPointers(VkInstance vk_instance);
// These functions assume that vkGetDeviceProcAddr has been populated.
- VULKAN_EXPORT bool BindDeviceFunctionPointers(VkDevice vk_device);
+ // |using_swiftshader| allows functions that aren't supported by Swiftshader
+ // to be missing.
+ // TODO(samans): Remove |using_swiftshader| once all the workarounds can be
+ // removed. https://crbug.com/963988
+ VULKAN_EXPORT bool BindDeviceFunctionPointers(VkDevice vk_device,
+ bool using_swiftshader = false);
bool BindSwapchainFunctionPointers(VkDevice vk_device);
base::NativeLibrary vulkan_loader_library_ = nullptr;
@@ -435,16 +449,21 @@ struct VulkanFunctionPointers {
""")
def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
- functions):
+ functions, allow_missing=False):
template = Template(""" ${name}Fn = reinterpret_cast<PFN_${name}>(
$get_proc_addr($parent, "$name"));
- if (!${name}Fn)
+ if (!${name}Fn${check_swiftshader})
return false;
""")
+ if allow_missing:
+ check_swiftshader = " && !using_swiftshader"
+ else:
+ check_swiftshader = ""
for func in functions:
file.write(template.substitute(name=func['name'], get_proc_addr =
- proc_addr_function, parent=parent))
+ proc_addr_function, parent=parent,
+ check_swiftshader=check_swiftshader))
def WriteUnassociatedFunctionPointerInitialization(file, functions):
WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddrFn', 'nullptr',
@@ -454,9 +473,11 @@ def WriteInstanceFunctionPointerInitialization(file, functions):
WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddrFn',
'vk_instance', functions)
-def WriteDeviceFunctionPointerInitialization(file, functions):
+def WriteDeviceFunctionPointerInitialization(file,
+ functions,
+ allow_missing=False):
WriteFunctionPointerInitialization(file, 'vkGetDeviceProcAddrFn', 'vk_device',
- functions)
+ functions, allow_missing)
def GenerateSourceFile(file, unassociated_functions, instance_functions,
physical_device_functions, device_functions,
@@ -530,7 +551,9 @@ bool VulkanFunctionPointers::BindPhysicalDeviceFunctionPointers(
return true;
}
-bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
+bool VulkanFunctionPointers::BindDeviceFunctionPointers(
+ VkDevice vk_device,
+ bool using_swiftshader) {
// Device functions
""")
WriteDeviceFunctionPointerInitialization(file, device_functions)
@@ -554,7 +577,8 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
""")
WriteDeviceFunctionPointerInitialization(file,
- device_functions_linux_or_android)
+ device_functions_linux_or_android,
+ True) # allow_missing
file.write("""\
#endif
@@ -567,7 +591,8 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
""")
WriteDeviceFunctionPointerInitialization(file,
- device_functions_linux)
+ device_functions_linux,
+ True) # allow_missing
file.write("""\
#endif
diff --git a/chromium/gpu/vulkan/init/vulkan_factory.cc b/chromium/gpu/vulkan/init/vulkan_factory.cc
index 4b842bce4cc..51c3dffd7c2 100644
--- a/chromium/gpu/vulkan/init/vulkan_factory.cc
+++ b/chromium/gpu/vulkan/init/vulkan_factory.cc
@@ -26,9 +26,16 @@
namespace gpu {
-std::unique_ptr<VulkanImplementation> CreateVulkanImplementation() {
+std::unique_ptr<VulkanImplementation> CreateVulkanImplementation(
+ bool use_swiftshader) {
+#ifndef USE_X11
+ // TODO(samans): Support Swiftshader on more platforms.
+ // https://crbug.com/963988
+ DCHECK(!use_swiftshader)
+ << "Vulkan Swiftshader is not supported on this platform.";
+#endif
#if defined(USE_X11)
- return std::make_unique<VulkanImplementationX11>();
+ return std::make_unique<VulkanImplementationX11>(use_swiftshader);
#elif defined(OS_ANDROID)
return std::make_unique<VulkanImplementationAndroid>();
#elif defined(USE_OZONE)
diff --git a/chromium/gpu/vulkan/init/vulkan_factory.h b/chromium/gpu/vulkan/init/vulkan_factory.h
index 6aaa4b0338f..1983ddf1441 100644
--- a/chromium/gpu/vulkan/init/vulkan_factory.h
+++ b/chromium/gpu/vulkan/init/vulkan_factory.h
@@ -13,7 +13,8 @@
namespace gpu {
COMPONENT_EXPORT(VULKAN_INIT)
-std::unique_ptr<VulkanImplementation> CreateVulkanImplementation();
+std::unique_ptr<VulkanImplementation> CreateVulkanImplementation(
+ bool use_swiftshader = false);
} // namespace gpu
diff --git a/chromium/gpu/vulkan/semaphore_handle.cc b/chromium/gpu/vulkan/semaphore_handle.cc
index 00ee9660d38..40f2753016b 100644
--- a/chromium/gpu/vulkan/semaphore_handle.cc
+++ b/chromium/gpu/vulkan/semaphore_handle.cc
@@ -38,7 +38,7 @@ SemaphoreHandle SemaphoreHandle::Duplicate() const {
base::ScopedFD(HANDLE_EINTR(dup(handle_.get()))));
#elif defined(OS_WIN)
HANDLE handle_dup;
- if (!::DuplicateHandle(::GetCurrentProcess(), handle_.get(),
+ if (!::DuplicateHandle(::GetCurrentProcess(), handle_.Get(),
::GetCurrentProcess(), &handle_dup, 0, FALSE,
DUPLICATE_SAME_ACCESS)) {
return SemaphoreHandle();
diff --git a/chromium/gpu/vulkan/semaphore_handle.h b/chromium/gpu/vulkan/semaphore_handle.h
index c59ad7a7025..3b1382447ea 100644
--- a/chromium/gpu/vulkan/semaphore_handle.h
+++ b/chromium/gpu/vulkan/semaphore_handle.h
@@ -51,7 +51,13 @@ class VULKAN_EXPORT SemaphoreHandle {
VkExternalSemaphoreHandleTypeFlagBits vk_handle_type() { return type_; }
- bool is_valid() const { return handle_.is_valid(); }
+ bool is_valid() const {
+#if defined(OS_WIN)
+ return handle_.IsValid();
+#else
+ return handle_.is_valid();
+#endif
+ }
// Returns underlying platform-specific handle for the semaphore. is_valid()
// becomes false after this function returns.
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.cc b/chromium/gpu/vulkan/vulkan_command_buffer.cc
index b76240057a0..ba776e458f6 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.cc
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.cc
@@ -11,6 +11,70 @@
namespace gpu {
+namespace {
+
+VkPipelineStageFlags GetPipelineStageFlags(const VkImageLayout layout) {
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+ case VK_IMAGE_LAYOUT_GENERAL:
+ return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return VK_PIPELINE_STAGE_HOST_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_PIPELINE_STAGE_TRANSFER_BIT;
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
+ VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
+ VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
+ VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
+ VK_PIPELINE_STAGE_VERTEX_SHADER_BIT;
+ case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
+ return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
+ default:
+ NOTREACHED() << "layout=" << layout;
+ }
+ return 0;
+}
+
+VkAccessFlags GetAccessMask(const VkImageLayout layout) {
+ switch (layout) {
+ case VK_IMAGE_LAYOUT_UNDEFINED:
+ return 0;
+ case VK_IMAGE_LAYOUT_GENERAL:
+ DLOG(WARNING) << "VK_IMAGE_LAYOUT_GENERAL is used.";
+ return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
+ VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT |
+ VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
+ VK_ACCESS_HOST_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT |
+ VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_PREINITIALIZED:
+ return VK_ACCESS_HOST_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
+ return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
+ return VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
+ return VK_ACCESS_TRANSFER_READ_BIT;
+ case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
+ return VK_ACCESS_TRANSFER_WRITE_BIT;
+ case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
+ return 0;
+ default:
+ NOTREACHED() << "layout=" << layout;
+ }
+ return 0;
+}
+
+} // namespace
+
VulkanCommandBuffer::VulkanCommandBuffer(VulkanDeviceQueue* device_queue,
VulkanCommandPool* command_pool,
bool primary)
@@ -31,12 +95,14 @@ bool VulkanCommandBuffer::Initialize() {
VkResult result = VK_SUCCESS;
VkDevice device = device_queue_->GetVulkanDevice();
- VkCommandBufferAllocateInfo command_buffer_info = {};
- command_buffer_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
- command_buffer_info.commandPool = command_pool_->handle();
- command_buffer_info.level = primary_ ? VK_COMMAND_BUFFER_LEVEL_PRIMARY
- : VK_COMMAND_BUFFER_LEVEL_SECONDARY;
- command_buffer_info.commandBufferCount = 1;
+ VkCommandBufferAllocateInfo command_buffer_info = {
+ .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ .pNext = nullptr,
+ .commandPool = command_pool_->handle(),
+ .level = primary_ ? VK_COMMAND_BUFFER_LEVEL_PRIMARY
+ : VK_COMMAND_BUFFER_LEVEL_SECONDARY,
+ .commandBufferCount = 1,
+ };
result =
vkAllocateCommandBuffers(device, &command_buffer_info, &command_buffer_);
@@ -66,19 +132,22 @@ bool VulkanCommandBuffer::Submit(uint32_t num_wait_semaphores,
VkSemaphore* wait_semaphores,
uint32_t num_signal_semaphores,
VkSemaphore* signal_semaphores) {
- VkPipelineStageFlags wait_dst_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
-
DCHECK(primary_);
- VkSubmitInfo submit_info = {};
- submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
- submit_info.commandBufferCount = 1;
- submit_info.pCommandBuffers = &command_buffer_;
- submit_info.waitSemaphoreCount = num_wait_semaphores;
- submit_info.pWaitSemaphores = wait_semaphores;
- submit_info.pWaitDstStageMask = &wait_dst_stage_mask;
- submit_info.signalSemaphoreCount = num_signal_semaphores;
- submit_info.pSignalSemaphores = signal_semaphores;
+ std::vector<VkPipelineStageFlags> wait_dst_stage_mask(
+ num_wait_semaphores, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
+
+ VkSubmitInfo submit_info = {
+ .sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ .pNext = nullptr,
+ .waitSemaphoreCount = num_wait_semaphores,
+ .pWaitSemaphores = wait_semaphores,
+ .pWaitDstStageMask = wait_dst_stage_mask.data(),
+ .commandBufferCount = 1,
+ .pCommandBuffers = &command_buffer_,
+ .signalSemaphoreCount = num_signal_semaphores,
+ .pSignalSemaphores = signal_semaphores,
+ };
VkResult result = VK_SUCCESS;
@@ -135,6 +204,50 @@ bool VulkanCommandBuffer::SubmissionFinished() {
return device_queue_->GetFenceHelper()->HasPassed(submission_fence_);
}
+void VulkanCommandBuffer::TransitionImageLayout(VkImage image,
+ VkImageLayout old_layout,
+ VkImageLayout new_layout) {
+ VkImageMemoryBarrier barrier = {
+ .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ .srcAccessMask = GetAccessMask(old_layout),
+ .dstAccessMask = GetAccessMask(new_layout),
+ .oldLayout = old_layout,
+ .newLayout = new_layout,
+ .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
+ .image = image,
+ .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .subresourceRange.baseMipLevel = 0,
+ .subresourceRange.levelCount = 1,
+ .subresourceRange.baseArrayLayer = 0,
+ .subresourceRange.layerCount = 1,
+ };
+ vkCmdPipelineBarrier(command_buffer_, GetPipelineStageFlags(old_layout),
+ GetPipelineStageFlags(new_layout), 0, 0, nullptr, 0,
+ nullptr, 1, &barrier);
+}
+
+void VulkanCommandBuffer::CopyBufferToImage(VkBuffer buffer,
+ VkImage image,
+ uint32_t buffer_width,
+ uint32_t buffer_height,
+ uint32_t width,
+ uint32_t height) {
+ VkBufferImageCopy region = {
+ .bufferOffset = 0,
+ .bufferRowLength = buffer_width,
+ .bufferImageHeight = buffer_height,
+ .imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
+ .imageSubresource.mipLevel = 0,
+ .imageSubresource.baseArrayLayer = 0,
+ .imageSubresource.layerCount = 1,
+ .imageOffset = {0, 0, 0},
+ .imageExtent = {width, height, 1},
+ };
+ vkCmdCopyBufferToImage(command_buffer_, buffer, image,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
+}
+
void VulkanCommandBuffer::PostExecution() {
if (record_type_ == RECORD_TYPE_SINGLE_USE) {
// Clear upon next use.
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.h b/chromium/gpu/vulkan/vulkan_command_buffer.h
index 07092677fe7..a7c9552df7c 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.h
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.h
@@ -45,6 +45,16 @@ class VULKAN_EXPORT VulkanCommandBuffer {
// is finished.
bool SubmissionFinished();
+ void TransitionImageLayout(VkImage image,
+ VkImageLayout old_layout,
+ VkImageLayout new_layout);
+ void CopyBufferToImage(VkBuffer buffer,
+ VkImage image,
+ uint32_t buffer_width,
+ uint32_t buffer_height,
+ uint32_t width,
+ uint32_t height);
+
private:
friend class CommandBufferRecorderBase;
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.cc b/chromium/gpu/vulkan/vulkan_device_queue.cc
index 4c327acffce..bfbc14658f7 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.cc
+++ b/chromium/gpu/vulkan/vulkan_device_queue.cc
@@ -26,7 +26,8 @@ VulkanDeviceQueue::~VulkanDeviceQueue() {
bool VulkanDeviceQueue::Initialize(
uint32_t options,
const std::vector<const char*>& required_extensions,
- const GetPresentationSupportCallback& get_presentation_support) {
+ const GetPresentationSupportCallback& get_presentation_support,
+ bool use_swiftshader) {
DCHECK_EQ(static_cast<VkPhysicalDevice>(VK_NULL_HANDLE), vk_physical_device_);
DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), owned_vk_device_);
DCHECK_EQ(static_cast<VkDevice>(VK_NULL_HANDLE), vk_device_);
@@ -88,6 +89,9 @@ bool VulkanDeviceQueue::Initialize(
return false;
vk_physical_device_ = devices[device_index];
+ vkGetPhysicalDeviceProperties(vk_physical_device_,
+ &vk_physical_device_properties_);
+
vk_queue_index_ = queue_index;
float queue_priority = 0.0f;
@@ -148,7 +152,8 @@ bool VulkanDeviceQueue::Initialize(
enabled_extensions_ = gfx::ExtensionSet(std::begin(enabled_extensions),
std::end(enabled_extensions));
- gpu::GetVulkanFunctionPointers()->BindDeviceFunctionPointers(vk_device_);
+ gpu::GetVulkanFunctionPointers()->BindDeviceFunctionPointers(vk_device_,
+ use_swiftshader);
if (gfx::HasExtension(enabled_extensions_, VK_KHR_SWAPCHAIN_EXTENSION_NAME))
gpu::GetVulkanFunctionPointers()->BindSwapchainFunctionPointers(vk_device_);
@@ -182,6 +187,7 @@ bool VulkanDeviceQueue::InitializeForWevbView(
}
void VulkanDeviceQueue::Destroy() {
+ cleanup_helper_->Destroy();
cleanup_helper_.reset();
if (VK_NULL_HANDLE != owned_vk_device_) {
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.h b/chromium/gpu/vulkan/vulkan_device_queue.h
index a5b954290cf..735c23514fa 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.h
+++ b/chromium/gpu/vulkan/vulkan_device_queue.h
@@ -37,7 +37,8 @@ class VULKAN_EXPORT VulkanDeviceQueue {
bool Initialize(
uint32_t options,
const std::vector<const char*>& required_extensions,
- const GetPresentationSupportCallback& get_presentation_support);
+ const GetPresentationSupportCallback& get_presentation_support,
+ bool use_swiftshader);
bool InitializeForWevbView(VkPhysicalDevice vk_physical_device,
VkDevice vk_device,
@@ -57,6 +58,10 @@ class VULKAN_EXPORT VulkanDeviceQueue {
return vk_physical_device_;
}
+ const VkPhysicalDeviceProperties& vk_physical_device_properties() const {
+ return vk_physical_device_properties_;
+ }
+
VkDevice GetVulkanDevice() const {
DCHECK_NE(static_cast<VkDevice>(VK_NULL_HANDLE), vk_device_);
return vk_device_;
@@ -78,6 +83,7 @@ class VULKAN_EXPORT VulkanDeviceQueue {
private:
gfx::ExtensionSet enabled_extensions_;
VkPhysicalDevice vk_physical_device_ = VK_NULL_HANDLE;
+ VkPhysicalDeviceProperties vk_physical_device_properties_;
VkDevice owned_vk_device_ = VK_NULL_HANDLE;
VkDevice vk_device_ = VK_NULL_HANDLE;
VkQueue vk_queue_ = VK_NULL_HANDLE;
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.cc b/chromium/gpu/vulkan/vulkan_fence_helper.cc
index a4235765c79..1cd0ae46398 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.cc
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.cc
@@ -19,9 +19,14 @@ VulkanFenceHelper::FenceHandle& VulkanFenceHelper::FenceHandle::operator=(
const FenceHandle& other) = default;
VulkanFenceHelper::VulkanFenceHelper(VulkanDeviceQueue* device_queue)
- : device_queue_(device_queue) {}
+ : device_queue_(device_queue), weak_factory_(this) {}
VulkanFenceHelper::~VulkanFenceHelper() {
+ DCHECK(tasks_pending_fence_.empty());
+ DCHECK(cleanup_tasks_.empty());
+}
+
+void VulkanFenceHelper::Destroy() {
PerformImmediateCleanup();
}
@@ -38,7 +43,7 @@ VkResult VulkanFenceHelper::GetFence(VkFence* fence) {
VulkanFenceHelper::FenceHandle VulkanFenceHelper::EnqueueFence(VkFence fence) {
FenceHandle handle(fence, next_generation_++);
- cleanup_tasks_.emplace(handle, std::move(tasks_pending_fence_));
+ cleanup_tasks_.emplace_back(handle, std::move(tasks_pending_fence_));
tasks_pending_fence_ = std::vector<CleanupTask>();
return handle;
@@ -74,29 +79,43 @@ void VulkanFenceHelper::ProcessCleanupTasks() {
VkDevice device = device_queue_->GetVulkanDevice();
// Iterate over our pending cleanup fences / tasks, advancing
- // |current_generation_| as far as possible. This assumes that fences pass in
- // order, which isn't a hard API guarantee, but should be close enough /
- // efficient enough for the purpose or processing cleanup tasks.
- //
- // Also runs any cleanup tasks for generations that have passed. Create a
- // temporary vector of tasks to run to avoid reentrancy issues.
- std::vector<CleanupTask> tasks_to_run;
- while (!cleanup_tasks_.empty()) {
- TasksForFence& tasks_for_fence = cleanup_tasks_.front();
- VkResult result = vkGetFenceStatus(device, tasks_for_fence.handle.fence_);
+ // |current_generation_| as far as possible.
+ for (const auto& tasks_for_fence : cleanup_tasks_) {
+ // If we're already ahead of this task (callback modified |generation_id_|),
+ // continue.
+ if (tasks_for_fence.generation_id <= current_generation_)
+ continue;
+
+ // Callback based tasks have no actual fence to wait on, keep checking
+ // future fences, as a callback may be delayed.
+ if (tasks_for_fence.UsingCallback())
+ continue;
+
+ VkResult result = vkGetFenceStatus(device, tasks_for_fence.fence);
if (result == VK_NOT_READY)
break;
if (result != VK_SUCCESS) {
PerformImmediateCleanup();
return;
}
- current_generation_ = tasks_for_fence.handle.generation_id_;
- vkDestroyFence(device, tasks_for_fence.handle.fence_, nullptr);
+ current_generation_ = tasks_for_fence.generation_id;
+ }
+ // Runs any cleanup tasks for generations that have passed. Create a temporary
+ // vector of tasks to run to avoid reentrancy issues.
+ std::vector<CleanupTask> tasks_to_run;
+ while (!cleanup_tasks_.empty()) {
+ TasksForFence& tasks_for_fence = cleanup_tasks_.front();
+ if (tasks_for_fence.generation_id > current_generation_)
+ break;
+ if (tasks_for_fence.fence != VK_NULL_HANDLE) {
+ DCHECK_EQ(vkGetFenceStatus(device, tasks_for_fence.fence), VK_SUCCESS);
+ vkDestroyFence(device, tasks_for_fence.fence, nullptr);
+ }
tasks_to_run.insert(tasks_to_run.end(),
std::make_move_iterator(tasks_for_fence.tasks.begin()),
std::make_move_iterator(tasks_for_fence.tasks.end()));
- cleanup_tasks_.pop();
+ cleanup_tasks_.pop_front();
}
for (auto& task : tasks_to_run)
@@ -123,6 +142,32 @@ VulkanFenceHelper::FenceHandle VulkanFenceHelper::GenerateCleanupFence() {
return EnqueueFence(fence);
}
+base::OnceClosure VulkanFenceHelper::CreateExternalCallback() {
+ // No need to do callback tracking if there are no cleanup tasks to run.
+ if (tasks_pending_fence_.empty())
+ return base::OnceClosure();
+
+ // Get a generation ID for this callback and associate existing cleanup
+ // tasks.
+ uint64_t generation_id = next_generation_++;
+ cleanup_tasks_.emplace_back(generation_id, std::move(tasks_pending_fence_));
+ tasks_pending_fence_ = std::vector<CleanupTask>();
+
+ return base::BindOnce(
+ [](base::WeakPtr<VulkanFenceHelper> fence_helper,
+ uint64_t generation_id) {
+ if (!fence_helper)
+ return;
+ // If |current_generation_| is ahead of the callback's
+ // |generation_id|, the callback came late. Ignore it.
+ if (generation_id > fence_helper->current_generation_) {
+ fence_helper->current_generation_ = generation_id;
+ fence_helper->ProcessCleanupTasks();
+ }
+ },
+ weak_factory_.GetWeakPtr(), generation_id);
+}
+
void VulkanFenceHelper::EnqueueSemaphoreCleanupForSubmittedWork(
VkSemaphore semaphore) {
if (semaphore == VK_NULL_HANDLE)
@@ -164,43 +209,71 @@ void VulkanFenceHelper::EnqueueImageCleanupForSubmittedWork(
image, memory));
}
+void VulkanFenceHelper::EnqueueBufferCleanupForSubmittedWork(
+ VkBuffer buffer,
+ VkDeviceMemory memory) {
+ if (buffer == VK_NULL_HANDLE && memory == VK_NULL_HANDLE)
+ return;
+
+ EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
+ [](VkBuffer buffer, VkDeviceMemory memory,
+ VulkanDeviceQueue* device_queue, bool /* is_lost */) {
+ if (buffer != VK_NULL_HANDLE)
+ vkDestroyBuffer(device_queue->GetVulkanDevice(), buffer, nullptr);
+ if (memory != VK_NULL_HANDLE)
+ vkFreeMemory(device_queue->GetVulkanDevice(), memory, nullptr);
+ },
+ buffer, memory));
+}
+
void VulkanFenceHelper::PerformImmediateCleanup() {
- // Rather than caring about fences, just wait for queue idle if possible.
+ if (cleanup_tasks_.empty() && tasks_pending_fence_.empty())
+ return;
+
+ // We want to run all tasks immediately, so just use vkQueueWaitIdle which
+ // ensures that all fences have passed.
+ // Even if exclusively using callbacks, the callbacks use WeakPtr and will
+ // not keep this class alive, so it's important to wait / run all cleanup
+ // immediately.
VkResult result = vkQueueWaitIdle(device_queue_->GetVulkanQueue());
// Wait can only fail for three reasons - device loss, host OOM, device OOM.
// If we hit an OOM, treat this as a crash. There isn't a great way to
// recover from this.
CHECK(result == VK_SUCCESS || result == VK_ERROR_DEVICE_LOST);
bool device_lost = result == VK_ERROR_DEVICE_LOST;
+ if (!device_lost)
+ current_generation_ = next_generation_ - 1;
// Run all cleanup tasks. Create a temporary vector of tasks to run to avoid
// reentrancy issues.
std::vector<CleanupTask> tasks_to_run;
- tasks_to_run.insert(tasks_to_run.end(),
- std::make_move_iterator(tasks_pending_fence_.begin()),
- std::make_move_iterator(tasks_pending_fence_.end()));
- tasks_pending_fence_.clear();
while (!cleanup_tasks_.empty()) {
auto& tasks_for_fence = cleanup_tasks_.front();
- vkDestroyFence(device_queue_->GetVulkanDevice(),
- tasks_for_fence.handle.fence_, nullptr);
+ vkDestroyFence(device_queue_->GetVulkanDevice(), tasks_for_fence.fence,
+ nullptr);
tasks_to_run.insert(tasks_to_run.end(),
std::make_move_iterator(tasks_for_fence.tasks.begin()),
std::make_move_iterator(tasks_for_fence.tasks.end()));
- cleanup_tasks_.pop();
+ cleanup_tasks_.pop_front();
}
+ tasks_to_run.insert(tasks_to_run.end(),
+ std::make_move_iterator(tasks_pending_fence_.begin()),
+ std::make_move_iterator(tasks_pending_fence_.end()));
+ tasks_pending_fence_.clear();
for (auto& task : tasks_to_run)
std::move(task).Run(device_queue_, device_lost);
}
VulkanFenceHelper::TasksForFence::TasksForFence(FenceHandle handle,
std::vector<CleanupTask> tasks)
- : handle(handle), tasks(std::move(tasks)) {}
+ : fence(handle.fence_),
+ generation_id(handle.generation_id_),
+ tasks(std::move(tasks)) {}
+VulkanFenceHelper::TasksForFence::TasksForFence(uint64_t generation_id,
+ std::vector<CleanupTask> tasks)
+ : generation_id(generation_id), tasks(std::move(tasks)) {}
VulkanFenceHelper::TasksForFence::~TasksForFence() = default;
VulkanFenceHelper::TasksForFence::TasksForFence(TasksForFence&& other) =
default;
-VulkanFenceHelper::TasksForFence&
-VulkanFenceHelper::TasksForFence::TasksForFence::operator=(
- TasksForFence&& other) = default;
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.h b/chromium/gpu/vulkan/vulkan_fence_helper.h
index a4d5827a42e..7bbef05d1a9 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.h
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.h
@@ -7,9 +7,11 @@
#include <vulkan/vulkan.h>
+#include "base/bind_helpers.h"
#include "base/callback.h"
-#include "base/containers/queue.h"
+#include "base/containers/circular_deque.h"
#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
#include "gpu/vulkan/vulkan_export.h"
namespace gpu {
@@ -21,6 +23,9 @@ class VULKAN_EXPORT VulkanFenceHelper {
explicit VulkanFenceHelper(VulkanDeviceQueue* device_queue);
~VulkanFenceHelper();
+ // Destroy the fence helper.
+ void Destroy();
+
// Class representing a fence registered with this system. Should be treated
// as an opaque handle.
class FenceHandle {
@@ -47,10 +52,9 @@ class VULKAN_EXPORT VulkanFenceHelper {
// In typical cases, callers will call GetFence to generate/reuse a fence,
// submit this fence, then call EnqueueFence to register it with this system.
//
- // In cases where fences are not being generated by Chrome (or in cases where
- // we can't use this helper, such as Skia), consumers should ensure that
- // GenerateCleanupFence is called once per frame to allow cleanup tasks to be
- // processed.
+ // In cases where fences are not being generated by Chrome, consumers should
+ // ensure that GenerateCleanupFence is called once per frame to allow cleanup
+ // tasks to be processed.
//
// Creates or recycles a fence.
VkResult GetFence(VkFence* fence);
@@ -66,6 +70,15 @@ class VULKAN_EXPORT VulkanFenceHelper {
// TODO(ericrk): We should avoid this in all cases if possible.
FenceHandle GenerateCleanupFence();
+ // Creates a callback that calls pending cleanup tasks. Used in cases where an
+ // external component (Skia) is submitting / waiting on a fence and cannot
+ // share that fence with this class.
+ // Note: It is important that no new cleanup tasks or fences are inserted
+ // between this call and the submission of the fence which will eventually
+ // trigger this callback. Doing so could cause the callbacks associated
+ // with this call to run out of order / incorrectly.
+ base::OnceClosure CreateExternalCallback();
+
// Helper functions which allow clients to wait for or check the statusof a
// fence submitted with EnqueueFence.
//
@@ -83,7 +96,8 @@ class VULKAN_EXPORT VulkanFenceHelper {
using CleanupTask = base::OnceCallback<void(VulkanDeviceQueue* device_queue,
bool device_lost)>;
// Submits a cleanup task for already submitted work. ProcessCleanupTasks
- // must be called periodically to ensure these run.
+ // must be called periodically to ensure these run. Cleanup tasks will be
+ // executed in order they are enqueued.
void EnqueueCleanupTaskForSubmittedWork(CleanupTask task);
// Processes CleanupTasks for which a fence has passed.
void ProcessCleanupTasks();
@@ -93,6 +107,11 @@ class VULKAN_EXPORT VulkanFenceHelper {
std::vector<VkSemaphore> semaphores);
void EnqueueImageCleanupForSubmittedWork(VkImage image,
VkDeviceMemory memory);
+ void EnqueueBufferCleanupForSubmittedWork(VkBuffer buffer,
+ VkDeviceMemory memory);
+ // Helpers for VulkanCommandBuffer, VulkanCommandPool, etc
+ template <typename T>
+ void EnqueueVulkanObjectCleanupForSubmittedWork(std::unique_ptr<T> obj);
private:
void PerformImmediateCleanup();
@@ -104,19 +123,37 @@ class VULKAN_EXPORT VulkanFenceHelper {
uint64_t current_generation_ = 0;
struct TasksForFence {
+ // Constructor when tasks associated with a fence.
TasksForFence(FenceHandle handle, std::vector<CleanupTask> tasks);
+ // Constructor when tasks associated with Skia callback.
+ TasksForFence(uint64_t generation_id, std::vector<CleanupTask> tasks);
~TasksForFence();
TasksForFence(TasksForFence&& other);
TasksForFence& operator=(TasksForFence&& other);
- FenceHandle handle;
+ bool UsingCallback() const { return fence == VK_NULL_HANDLE; }
+
+ const VkFence fence = VK_NULL_HANDLE;
+ const uint64_t generation_id = 0;
+
std::vector<CleanupTask> tasks;
};
- base::queue<TasksForFence> cleanup_tasks_;
+ base::circular_deque<TasksForFence> cleanup_tasks_;
+
+ base::WeakPtrFactory<VulkanFenceHelper> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(VulkanFenceHelper);
};
+template <typename T>
+void VulkanFenceHelper::EnqueueVulkanObjectCleanupForSubmittedWork(
+ std::unique_ptr<T> obj) {
+ EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce([](std::unique_ptr<T> obj, VulkanDeviceQueue* device_queue,
+ bool device_lost) { obj->Destroy(); },
+ std::move(obj)));
+}
+
} // namespace gpu
#endif // GPU_VULKAN_VULKAN_FENCE_HELPER_H_
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc b/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc
index 2b68af3873f..8e3d9d1ae79 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc
+++ b/chromium/gpu/vulkan/vulkan_fence_helper_unittest.cc
@@ -93,4 +93,94 @@ TEST_F(VulkanFenceHelperTest, TestMultipleCallbacks) {
fence_helper->Wait(fence_handle, UINT64_MAX);
EXPECT_EQ(10u, cleanups_run);
}
+
+TEST_F(VulkanFenceHelperTest, TestSkiaCallback) {
+ VulkanFenceHelper* fence_helper = GetDeviceQueue()->GetFenceHelper();
+ bool cleanup_run = false;
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce([](bool* cleanup_run, VulkanDeviceQueue* device_queue,
+ bool is_lost) { *cleanup_run = true; },
+ &cleanup_run));
+ auto cleanup_closure = fence_helper->CreateExternalCallback();
+ EXPECT_FALSE(cleanup_run);
+ std::move(cleanup_closure).Run();
+ EXPECT_TRUE(cleanup_run);
+}
+
+TEST_F(VulkanFenceHelperTest, SkiaCallbackBeforeFences) {
+ VulkanFenceHelper* fence_helper = GetDeviceQueue()->GetFenceHelper();
+ uint32_t cleanups_run = 0;
+ auto increment_cleanups_callback =
+ [](uint32_t expected_index, uint32_t* cleanups_run,
+ VulkanDeviceQueue* device_queue, bool is_lost) {
+ EXPECT_EQ(expected_index, *cleanups_run);
+ *cleanups_run = *cleanups_run + 1;
+ };
+
+ // Enqueue 5 callbacks.
+ for (int i = 0; i < 5; i++) {
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce(increment_cleanups_callback, i, &cleanups_run));
+ }
+
+ // The first 5 callbacks use a callback to trigger.
+ auto cleanup_closure = fence_helper->CreateExternalCallback();
+
+ // Enqueue 5 more callbacks.
+ for (int i = 5; i < 10; i++) {
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce(increment_cleanups_callback, i, &cleanups_run));
+ }
+
+ // Generate a cleanup fence for the next 5 callbacks.
+ VulkanFenceHelper::FenceHandle fence_handle =
+ fence_helper->GenerateCleanupFence();
+ EXPECT_TRUE(fence_handle.is_valid());
+
+ // After waiting for the second fence, all callbacks should have run, Skia
+ // callbacks can be delayed, so we check future fences as well.
+ EXPECT_TRUE(fence_helper->Wait(fence_handle, UINT64_MAX));
+ EXPECT_EQ(10u, cleanups_run);
+
+ // Running the callback now should be a no-op.
+ std::move(cleanup_closure).Run();
+ EXPECT_EQ(10u, cleanups_run);
+}
+
+TEST_F(VulkanFenceHelperTest, SkiaCallbackAfterFences) {
+ VulkanFenceHelper* fence_helper = GetDeviceQueue()->GetFenceHelper();
+ uint32_t cleanups_run = 0;
+ auto increment_cleanups_callback =
+ [](uint32_t expected_index, uint32_t* cleanups_run,
+ VulkanDeviceQueue* device_queue, bool is_lost) {
+ EXPECT_EQ(expected_index, *cleanups_run);
+ *cleanups_run = *cleanups_run + 1;
+ };
+
+ // Enqueue 5 callbacks.
+ for (int i = 0; i < 5; i++) {
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce(increment_cleanups_callback, i, &cleanups_run));
+ }
+
+ // The first 5 callbacks use a fence to trigger.
+ VulkanFenceHelper::FenceHandle fence_handle =
+ fence_helper->GenerateCleanupFence();
+ EXPECT_TRUE(fence_handle.is_valid());
+
+ // Enqueue 5 more callbacks.
+ for (int i = 5; i < 10; i++) {
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce(increment_cleanups_callback, i, &cleanups_run));
+ }
+
+ // The next 5 callbacks use a callback to trigger.
+ auto cleanup_closure = fence_helper->CreateExternalCallback();
+
+ // Call the cleanup closure, all callbacks should run.
+ // Generate a cleanup fence for the next 5 callbacks.
+ std::move(cleanup_closure).Run();
+ EXPECT_EQ(10u, cleanups_run);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index ba264c7c951..ff707b3c31c 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -94,6 +94,13 @@ bool VulkanFunctionPointers::BindPhysicalDeviceFunctionPointers(
if (!vkEnumerateDeviceLayerPropertiesFn)
return false;
+ vkGetPhysicalDeviceMemoryPropertiesFn =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceMemoryProperties>(
+ vkGetInstanceProcAddrFn(vk_instance,
+ "vkGetPhysicalDeviceMemoryProperties"));
+ if (!vkGetPhysicalDeviceMemoryPropertiesFn)
+ return false;
+
vkGetPhysicalDeviceQueueFamilyPropertiesFn =
reinterpret_cast<PFN_vkGetPhysicalDeviceQueueFamilyProperties>(
vkGetInstanceProcAddrFn(vk_instance,
@@ -101,10 +108,19 @@ bool VulkanFunctionPointers::BindPhysicalDeviceFunctionPointers(
if (!vkGetPhysicalDeviceQueueFamilyPropertiesFn)
return false;
+ vkGetPhysicalDevicePropertiesFn =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
+ vkGetInstanceProcAddrFn(vk_instance,
+ "vkGetPhysicalDeviceProperties"));
+ if (!vkGetPhysicalDevicePropertiesFn)
+ return false;
+
return true;
}
-bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
+bool VulkanFunctionPointers::BindDeviceFunctionPointers(
+ VkDevice vk_device,
+ bool using_swiftshader) {
// Device functions
vkAllocateCommandBuffersFn = reinterpret_cast<PFN_vkAllocateCommandBuffers>(
vkGetDeviceProcAddrFn(vk_device, "vkAllocateCommandBuffers"));
@@ -121,6 +137,11 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkAllocateMemoryFn)
return false;
+ vkBindBufferMemoryFn = reinterpret_cast<PFN_vkBindBufferMemory>(
+ vkGetDeviceProcAddrFn(vk_device, "vkBindBufferMemory"));
+ if (!vkBindBufferMemoryFn)
+ return false;
+
vkBindImageMemoryFn = reinterpret_cast<PFN_vkBindImageMemory>(
vkGetDeviceProcAddrFn(vk_device, "vkBindImageMemory"));
if (!vkBindImageMemoryFn)
@@ -131,6 +152,11 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkCreateCommandPoolFn)
return false;
+ vkCreateBufferFn = reinterpret_cast<PFN_vkCreateBuffer>(
+ vkGetDeviceProcAddrFn(vk_device, "vkCreateBuffer"));
+ if (!vkCreateBufferFn)
+ return false;
+
vkCreateDescriptorPoolFn = reinterpret_cast<PFN_vkCreateDescriptorPool>(
vkGetDeviceProcAddrFn(vk_device, "vkCreateDescriptorPool"));
if (!vkCreateDescriptorPoolFn)
@@ -182,6 +208,11 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkCreateShaderModuleFn)
return false;
+ vkDestroyBufferFn = reinterpret_cast<PFN_vkDestroyBuffer>(
+ vkGetDeviceProcAddrFn(vk_device, "vkDestroyBuffer"));
+ if (!vkDestroyBufferFn)
+ return false;
+
vkDestroyCommandPoolFn = reinterpret_cast<PFN_vkDestroyCommandPool>(
vkGetDeviceProcAddrFn(vk_device, "vkDestroyCommandPool"));
if (!vkDestroyCommandPoolFn)
@@ -263,6 +294,12 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkFreeMemoryFn)
return false;
+ vkGetBufferMemoryRequirementsFn =
+ reinterpret_cast<PFN_vkGetBufferMemoryRequirements>(
+ vkGetDeviceProcAddrFn(vk_device, "vkGetBufferMemoryRequirements"));
+ if (!vkGetBufferMemoryRequirementsFn)
+ return false;
+
vkGetDeviceQueueFn = reinterpret_cast<PFN_vkGetDeviceQueue>(
vkGetDeviceProcAddrFn(vk_device, "vkGetDeviceQueue"));
if (!vkGetDeviceQueueFn)
@@ -279,11 +316,21 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkGetImageMemoryRequirementsFn)
return false;
+ vkMapMemoryFn = reinterpret_cast<PFN_vkMapMemory>(
+ vkGetDeviceProcAddrFn(vk_device, "vkMapMemory"));
+ if (!vkMapMemoryFn)
+ return false;
+
vkResetFencesFn = reinterpret_cast<PFN_vkResetFences>(
vkGetDeviceProcAddrFn(vk_device, "vkResetFences"));
if (!vkResetFencesFn)
return false;
+ vkUnmapMemoryFn = reinterpret_cast<PFN_vkUnmapMemory>(
+ vkGetDeviceProcAddrFn(vk_device, "vkUnmapMemory"));
+ if (!vkUnmapMemoryFn)
+ return false;
+
vkUpdateDescriptorSetsFn = reinterpret_cast<PFN_vkUpdateDescriptorSets>(
vkGetDeviceProcAddrFn(vk_device, "vkUpdateDescriptorSets"));
if (!vkUpdateDescriptorSetsFn)
@@ -309,12 +356,12 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
vkGetSemaphoreFdKHRFn = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
vkGetDeviceProcAddrFn(vk_device, "vkGetSemaphoreFdKHR"));
- if (!vkGetSemaphoreFdKHRFn)
+ if (!vkGetSemaphoreFdKHRFn && !using_swiftshader)
return false;
vkImportSemaphoreFdKHRFn = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
vkGetDeviceProcAddrFn(vk_device, "vkImportSemaphoreFdKHR"));
- if (!vkImportSemaphoreFdKHRFn)
+ if (!vkImportSemaphoreFdKHRFn && !using_swiftshader)
return false;
#endif
@@ -323,7 +370,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
vkGetMemoryFdKHRFn = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
vkGetDeviceProcAddrFn(vk_device, "vkGetMemoryFdKHR"));
- if (!vkGetMemoryFdKHRFn)
+ if (!vkGetMemoryFdKHRFn && !using_swiftshader)
return false;
#endif
@@ -394,6 +441,11 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(VkDevice vk_device) {
if (!vkCmdBeginRenderPassFn)
return false;
+ vkCmdCopyBufferToImageFn = reinterpret_cast<PFN_vkCmdCopyBufferToImage>(
+ vkGetDeviceProcAddrFn(vk_device, "vkCmdCopyBufferToImage"));
+ if (!vkCmdCopyBufferToImageFn)
+ return false;
+
vkCmdEndRenderPassFn = reinterpret_cast<PFN_vkCmdEndRenderPass>(
vkGetDeviceProcAddrFn(vk_device, "vkCmdEndRenderPass"));
if (!vkCmdEndRenderPassFn)
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index 678984820e2..2ca160f4994 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -47,7 +47,12 @@ struct VulkanFunctionPointers {
VULKAN_EXPORT bool BindPhysicalDeviceFunctionPointers(VkInstance vk_instance);
// These functions assume that vkGetDeviceProcAddr has been populated.
- VULKAN_EXPORT bool BindDeviceFunctionPointers(VkDevice vk_device);
+ // |using_swiftshader| allows functions that aren't supported by Swiftshader
+ // to be missing.
+ // TODO(samans): Remove |using_swiftshader| once all the workarounds can be
+ // removed. https://crbug.com/963988
+ VULKAN_EXPORT bool BindDeviceFunctionPointers(VkDevice vk_device,
+ bool using_swiftshader = false);
bool BindSwapchainFunctionPointers(VkDevice vk_device);
base::NativeLibrary vulkan_loader_library_ = nullptr;
@@ -73,8 +78,11 @@ struct VulkanFunctionPointers {
PFN_vkCreateDevice vkCreateDeviceFn = nullptr;
PFN_vkEnumerateDeviceLayerProperties vkEnumerateDeviceLayerPropertiesFn =
nullptr;
+ PFN_vkGetPhysicalDeviceMemoryProperties
+ vkGetPhysicalDeviceMemoryPropertiesFn = nullptr;
PFN_vkGetPhysicalDeviceQueueFamilyProperties
vkGetPhysicalDeviceQueueFamilyPropertiesFn = nullptr;
+ PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDevicePropertiesFn = nullptr;
PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR
vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn = nullptr;
PFN_vkGetPhysicalDeviceSurfaceFormatsKHR
@@ -89,8 +97,10 @@ struct VulkanFunctionPointers {
PFN_vkAllocateCommandBuffers vkAllocateCommandBuffersFn = nullptr;
PFN_vkAllocateDescriptorSets vkAllocateDescriptorSetsFn = nullptr;
PFN_vkAllocateMemory vkAllocateMemoryFn = nullptr;
+ PFN_vkBindBufferMemory vkBindBufferMemoryFn = nullptr;
PFN_vkBindImageMemory vkBindImageMemoryFn = nullptr;
PFN_vkCreateCommandPool vkCreateCommandPoolFn = nullptr;
+ PFN_vkCreateBuffer vkCreateBufferFn = nullptr;
PFN_vkCreateDescriptorPool vkCreateDescriptorPoolFn = nullptr;
PFN_vkCreateDescriptorSetLayout vkCreateDescriptorSetLayoutFn = nullptr;
PFN_vkCreateFence vkCreateFenceFn = nullptr;
@@ -101,6 +111,7 @@ struct VulkanFunctionPointers {
PFN_vkCreateSampler vkCreateSamplerFn = nullptr;
PFN_vkCreateSemaphore vkCreateSemaphoreFn = nullptr;
PFN_vkCreateShaderModule vkCreateShaderModuleFn = nullptr;
+ PFN_vkDestroyBuffer vkDestroyBufferFn = nullptr;
PFN_vkDestroyCommandPool vkDestroyCommandPoolFn = nullptr;
PFN_vkDestroyDescriptorPool vkDestroyDescriptorPoolFn = nullptr;
PFN_vkDestroyDescriptorSetLayout vkDestroyDescriptorSetLayoutFn = nullptr;
@@ -117,10 +128,13 @@ struct VulkanFunctionPointers {
PFN_vkFreeCommandBuffers vkFreeCommandBuffersFn = nullptr;
PFN_vkFreeDescriptorSets vkFreeDescriptorSetsFn = nullptr;
PFN_vkFreeMemory vkFreeMemoryFn = nullptr;
+ PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirementsFn = nullptr;
PFN_vkGetDeviceQueue vkGetDeviceQueueFn = nullptr;
PFN_vkGetFenceStatus vkGetFenceStatusFn = nullptr;
PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirementsFn = nullptr;
+ PFN_vkMapMemory vkMapMemoryFn = nullptr;
PFN_vkResetFences vkResetFencesFn = nullptr;
+ PFN_vkUnmapMemory vkUnmapMemoryFn = nullptr;
PFN_vkUpdateDescriptorSets vkUpdateDescriptorSetsFn = nullptr;
PFN_vkWaitForFences vkWaitForFencesFn = nullptr;
@@ -164,6 +178,7 @@ struct VulkanFunctionPointers {
// Command Buffer functions
PFN_vkBeginCommandBuffer vkBeginCommandBufferFn = nullptr;
PFN_vkCmdBeginRenderPass vkCmdBeginRenderPassFn = nullptr;
+ PFN_vkCmdCopyBufferToImage vkCmdCopyBufferToImageFn = nullptr;
PFN_vkCmdEndRenderPass vkCmdEndRenderPassFn = nullptr;
PFN_vkCmdExecuteCommands vkCmdExecuteCommandsFn = nullptr;
PFN_vkCmdNextSubpass vkCmdNextSubpassFn = nullptr;
@@ -207,8 +222,12 @@ struct VulkanFunctionPointers {
#define vkCreateDevice gpu::GetVulkanFunctionPointers()->vkCreateDeviceFn
#define vkEnumerateDeviceLayerProperties \
gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceLayerPropertiesFn
+#define vkGetPhysicalDeviceMemoryProperties \
+ gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceMemoryPropertiesFn
#define vkGetPhysicalDeviceQueueFamilyProperties \
gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceQueueFamilyPropertiesFn
+#define vkGetPhysicalDeviceProperties \
+ gpu::GetVulkanFunctionPointers()->vkGetPhysicalDevicePropertiesFn
#define vkGetPhysicalDeviceSurfaceCapabilitiesKHR \
gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn
#define vkGetPhysicalDeviceSurfaceFormatsKHR \
@@ -227,9 +246,12 @@ struct VulkanFunctionPointers {
#define vkAllocateDescriptorSets \
gpu::GetVulkanFunctionPointers()->vkAllocateDescriptorSetsFn
#define vkAllocateMemory gpu::GetVulkanFunctionPointers()->vkAllocateMemoryFn
+#define vkBindBufferMemory \
+ gpu::GetVulkanFunctionPointers()->vkBindBufferMemoryFn
#define vkBindImageMemory gpu::GetVulkanFunctionPointers()->vkBindImageMemoryFn
#define vkCreateCommandPool \
gpu::GetVulkanFunctionPointers()->vkCreateCommandPoolFn
+#define vkCreateBuffer gpu::GetVulkanFunctionPointers()->vkCreateBufferFn
#define vkCreateDescriptorPool \
gpu::GetVulkanFunctionPointers()->vkCreateDescriptorPoolFn
#define vkCreateDescriptorSetLayout \
@@ -245,6 +267,7 @@ struct VulkanFunctionPointers {
#define vkCreateSemaphore gpu::GetVulkanFunctionPointers()->vkCreateSemaphoreFn
#define vkCreateShaderModule \
gpu::GetVulkanFunctionPointers()->vkCreateShaderModuleFn
+#define vkDestroyBuffer gpu::GetVulkanFunctionPointers()->vkDestroyBufferFn
#define vkDestroyCommandPool \
gpu::GetVulkanFunctionPointers()->vkDestroyCommandPoolFn
#define vkDestroyDescriptorPool \
@@ -271,11 +294,15 @@ struct VulkanFunctionPointers {
#define vkFreeDescriptorSets \
gpu::GetVulkanFunctionPointers()->vkFreeDescriptorSetsFn
#define vkFreeMemory gpu::GetVulkanFunctionPointers()->vkFreeMemoryFn
+#define vkGetBufferMemoryRequirements \
+ gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirementsFn
#define vkGetDeviceQueue gpu::GetVulkanFunctionPointers()->vkGetDeviceQueueFn
#define vkGetFenceStatus gpu::GetVulkanFunctionPointers()->vkGetFenceStatusFn
#define vkGetImageMemoryRequirements \
gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirementsFn
+#define vkMapMemory gpu::GetVulkanFunctionPointers()->vkMapMemoryFn
#define vkResetFences gpu::GetVulkanFunctionPointers()->vkResetFencesFn
+#define vkUnmapMemory gpu::GetVulkanFunctionPointers()->vkUnmapMemoryFn
#define vkUpdateDescriptorSets \
gpu::GetVulkanFunctionPointers()->vkUpdateDescriptorSetsFn
#define vkWaitForFences gpu::GetVulkanFunctionPointers()->vkWaitForFencesFn
@@ -321,6 +348,8 @@ struct VulkanFunctionPointers {
gpu::GetVulkanFunctionPointers()->vkBeginCommandBufferFn
#define vkCmdBeginRenderPass \
gpu::GetVulkanFunctionPointers()->vkCmdBeginRenderPassFn
+#define vkCmdCopyBufferToImage \
+ gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferToImageFn
#define vkCmdEndRenderPass \
gpu::GetVulkanFunctionPointers()->vkCmdEndRenderPassFn
#define vkCmdExecuteCommands \
diff --git a/chromium/gpu/vulkan/vulkan_implementation.cc b/chromium/gpu/vulkan/vulkan_implementation.cc
index f3fa76f9ac9..0d12f5eb1e9 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.cc
+++ b/chromium/gpu/vulkan/vulkan_implementation.cc
@@ -11,7 +11,8 @@
namespace gpu {
-VulkanImplementation::VulkanImplementation() {}
+VulkanImplementation::VulkanImplementation(bool use_swiftshader)
+ : use_swiftshader_(use_swiftshader) {}
VulkanImplementation::~VulkanImplementation() {}
@@ -26,7 +27,8 @@ std::unique_ptr<VulkanDeviceQueue> CreateVulkanDeviceQueue(
std::vector<const char*> required_extensions =
vulkan_implementation->GetRequiredDeviceExtensions();
if (!device_queue->Initialize(option, std::move(required_extensions),
- callback)) {
+ callback,
+ vulkan_implementation->use_swiftshader())) {
device_queue->Destroy();
return nullptr;
}
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 4bd584aa86f..198bbe3e31a 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -14,6 +14,9 @@
#include "build/build_config.h"
#include "gpu/vulkan/semaphore_handle.h"
#include "gpu/vulkan/vulkan_export.h"
+#include "ui/gfx/buffer_types.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gfx/gpu_memory_buffer.h"
#include "ui/gfx/native_widget_types.h"
#if defined(OS_ANDROID)
@@ -23,20 +26,21 @@
namespace gfx {
class GpuFence;
-}
+struct GpuMemoryBufferHandle;
+} // namespace gfx
namespace gpu {
-
class VulkanDeviceQueue;
class VulkanSurface;
class VulkanInstance;
+struct VulkanYCbCrInfo;
// Base class which provides functions for creating vulkan objects for different
// platforms that use platform-specific extensions (e.g. for creation of
// VkSurfaceKHR objects). It also provides helper/utility functions.
class VULKAN_EXPORT VulkanImplementation {
public:
- VulkanImplementation();
+ explicit VulkanImplementation(bool use_swiftshader = false);
virtual ~VulkanImplementation();
@@ -85,9 +89,29 @@ class VULKAN_EXPORT VulkanImplementation {
// external images and memory.
virtual VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() = 0;
+ // Returns true if the GpuMemoryBuffer of the specified type can be imported
+ // into VkImage using CreateImageFromGpuMemoryHandle().
+ virtual bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) = 0;
+
+ // Creates a VkImage from a GpuMemoryBuffer. If successful it initializes
+ // |vk_image|, |vk_image_info|, |vk_device_memory| and |mem_allocation_size|.
+ // Implementation must verify that the specified |size| fits in the size
+ // specified when |gmb_handle| was allocated.
+ virtual bool CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) = 0;
+
#if defined(OS_ANDROID)
// Create a VkImage, import Android AHardwareBuffer object created outside of
// the Vulkan device into Vulkan memory object and bind it to the VkImage.
+ // TODO(sergeyu): Remove this method and use
+ // CreateVkImageFromGpuMemoryHandle() instead.
virtual bool CreateVkImageAndImportAHB(
const VkDevice& vk_device,
const VkPhysicalDevice& vk_physical_device,
@@ -96,10 +120,20 @@ class VULKAN_EXPORT VulkanImplementation {
VkImage* vk_image,
VkImageCreateInfo* vk_image_info,
VkDeviceMemory* vk_device_memory,
- VkDeviceSize* mem_allocation_size) = 0;
+ VkDeviceSize* mem_allocation_size,
+ VulkanYCbCrInfo* ycbcr_info = nullptr) = 0;
+
+ // Get the sampler ycbcr conversion information from the AHB.
+ virtual bool GetSamplerYcbcrConversionInfo(
+ const VkDevice& vk_device,
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ VulkanYCbCrInfo* ycbcr_info) = 0;
#endif
+ bool use_swiftshader() const { return use_swiftshader_; }
+
private:
+ const bool use_swiftshader_;
DISALLOW_COPY_AND_ASSIGN(VulkanImplementation);
};
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index 2831776e081..696229e8bcd 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -47,7 +47,8 @@ VulkanInstance::~VulkanInstance() {
bool VulkanInstance::Initialize(
const std::vector<const char*>& required_extensions,
- const std::vector<const char*>& required_layers) {
+ const std::vector<const char*>& required_layers,
+ bool using_swiftshader) {
DCHECK(!vk_instance_);
VulkanFunctionPointers* vulkan_function_pointers =
@@ -231,7 +232,9 @@ bool VulkanInstance::Initialize(
reinterpret_cast<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>(
vkGetInstanceProcAddr(
vk_instance_, "vkGetPhysicalDeviceXlibPresentationSupportKHR"));
- if (!vkGetPhysicalDeviceXlibPresentationSupportKHR)
+ // TODO(samans): Remove |using_swiftshader| once Swiftshader supports this
+ // method. https://crbug.com/swiftshader/129
+ if (!vkGetPhysicalDeviceXlibPresentationSupportKHR && !using_swiftshader)
return false;
#endif
diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h
index 2762d97b59e..17ab9e2992c 100644
--- a/chromium/gpu/vulkan/vulkan_instance.h
+++ b/chromium/gpu/vulkan/vulkan_instance.h
@@ -26,8 +26,12 @@ class VULKAN_EXPORT VulkanInstance {
// The extensions in |required_extensions| and the layers in |required_layers|
// will be enabled in the created instance. See the "Extended Functionality"
// section of vulkan specification for more information.
+ // TODO(samans): Remove |using_swiftshader| once Vulkan Swiftshader is more
+ // developed and the workarounds that were added can be deleted.
+ // https://crbug.com/963988
bool Initialize(const std::vector<const char*>& required_extensions,
- const std::vector<const char*>& required_layers);
+ const std::vector<const char*>& required_layers,
+ bool using_swiftshader = false);
// VkApplicationInfo.apiVersion value used to initialize the instance.
uint32_t api_version() const { return api_version_; }
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.cc b/chromium/gpu/vulkan/vulkan_swap_chain.cc
index 84c9d388159..ecbf1a58c9a 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.cc
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_swap_chain.h"
+#include "base/bind.h"
#include "gpu/vulkan/vulkan_command_buffer.h"
#include "gpu/vulkan/vulkan_command_pool.h"
#include "gpu/vulkan/vulkan_device_queue.h"
@@ -13,91 +14,17 @@ namespace gpu {
namespace {
-VkPipelineStageFlags GetPipelineStageFlags(const VkImageLayout layout) {
- switch (layout) {
- case VK_IMAGE_LAYOUT_UNDEFINED:
- return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
- case VK_IMAGE_LAYOUT_GENERAL:
- return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
- case VK_IMAGE_LAYOUT_PREINITIALIZED:
- return VK_PIPELINE_STAGE_HOST_BIT;
- case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
- case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
- return VK_PIPELINE_STAGE_TRANSFER_BIT;
- case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
- return VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
- case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
- return VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
- VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
- VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
- VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
- case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
- return VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
- default:
- NOTREACHED() << "layout=" << layout;
- }
- return 0;
-}
-
-VkAccessFlags GetAccessMask(const VkImageLayout layout) {
- switch (layout) {
- case VK_IMAGE_LAYOUT_UNDEFINED:
- return 0;
- case VK_IMAGE_LAYOUT_GENERAL:
- DLOG(WARNING) << "VK_IMAGE_LAYOUT_GENERAL is used.";
- return VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
- VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_TRANSFER_READ_BIT |
- VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_HOST_WRITE_BIT |
- VK_ACCESS_HOST_READ_BIT;
- case VK_IMAGE_LAYOUT_PREINITIALIZED:
- return VK_ACCESS_HOST_WRITE_BIT;
- case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
- return VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
- VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
- case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
- return VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
- case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
- return VK_ACCESS_TRANSFER_READ_BIT;
- case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
- return VK_ACCESS_TRANSFER_WRITE_BIT;
- case VK_IMAGE_LAYOUT_PRESENT_SRC_KHR:
- return 0;
- default:
- NOTREACHED() << "layout=" << layout;
- }
- return 0;
-}
-
-void CmdSetImageLayout(VulkanCommandBuffer* command_buffer,
- VkImage image,
- VkImageLayout layout,
- VkImageLayout old_layout) {
- DCHECK_NE(layout, old_layout);
- VkImageMemoryBarrier image_memory_barrier = {
- .sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
- .pNext = nullptr,
- .srcAccessMask = GetAccessMask(old_layout),
- .dstAccessMask = GetAccessMask(layout),
- .oldLayout = old_layout,
- .newLayout = layout,
- .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
- .image = image,
- .subresourceRange =
- {
- .aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
- .baseMipLevel = 0,
- .levelCount = 1,
- .baseArrayLayer = 0,
- .layerCount = 1,
- },
- };
-
- ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
- vkCmdPipelineBarrier(recorder.handle(), GetPipelineStageFlags(old_layout),
- GetPipelineStageFlags(layout), 0, 0, nullptr, 0, nullptr,
- 1, &image_memory_barrier);
+VkSemaphore CreateSemaphore(VkDevice vk_device) {
+ // Generic semaphore creation structure.
+ VkSemaphoreCreateInfo semaphore_create_info = {
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO};
+
+ VkSemaphore vk_semaphore;
+ auto result = vkCreateSemaphore(vk_device, &semaphore_create_info, nullptr,
+ &vk_semaphore);
+ DLOG_IF(FATAL, VK_SUCCESS != result)
+ << "vkCreateSemaphore() failed: " << result;
+ return vk_semaphore;
}
} // namespace
@@ -107,7 +34,6 @@ VulkanSwapChain::VulkanSwapChain() {}
VulkanSwapChain::~VulkanSwapChain() {
DCHECK(images_.empty());
DCHECK_EQ(static_cast<VkSwapchainKHR>(VK_NULL_HANDLE), swap_chain_);
- DCHECK_EQ(static_cast<VkSemaphore>(VK_NULL_HANDLE), next_present_semaphore_);
}
bool VulkanSwapChain::Initialize(
@@ -118,43 +44,56 @@ bool VulkanSwapChain::Initialize(
std::unique_ptr<VulkanSwapChain> old_swap_chain) {
DCHECK(device_queue);
device_queue_ = device_queue;
+ device_queue_->GetFenceHelper()->ProcessCleanupTasks();
return InitializeSwapChain(surface, surface_caps, surface_format,
std::move(old_swap_chain)) &&
InitializeSwapImages(surface_caps, surface_format);
}
void VulkanSwapChain::Destroy() {
+ DCHECK(!is_writing_);
DestroySwapImages();
DestroySwapChain();
}
gfx::SwapResult VulkanSwapChain::SwapBuffers() {
- VkResult result = VK_SUCCESS;
+ DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
+ VkResult result = VK_SUCCESS;
VkDevice device = device_queue_->GetVulkanDevice();
VkQueue queue = device_queue_->GetVulkanQueue();
+ auto* fence_helper = device_queue_->GetFenceHelper();
auto& current_image_data = images_[current_image_];
-
- current_image_data->post_raster_command_buffer->Clear();
- CmdSetImageLayout(current_image_data->post_raster_command_buffer.get(),
- current_image_data->image,
- VK_IMAGE_LAYOUT_PRESENT_SRC_KHR /* layout */,
- current_image_data->layout /* old_layout */);
- current_image_data->layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
-
- // Submit our post_raster_command_buffer for the current buffer. It sets the
- // image layout for presenting.
- if (!current_image_data->post_raster_command_buffer->Submit(
- 0, nullptr, 1, &current_image_data->render_semaphore)) {
- return gfx::SwapResult::SWAP_FAILED;
+ if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
+ {
+ current_image_data.command_buffer->Clear();
+ ScopedSingleUseCommandBufferRecorder recorder(
+ *current_image_data.command_buffer);
+ current_image_data.command_buffer->TransitionImageLayout(
+ current_image_data.image, current_image_data.layout,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
+ }
+ current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+
+ VkSemaphore vk_semaphore = CreateSemaphore(device);
+ // Submit our command_buffer for the current buffer. It sets the image
+ // layout for presenting.
+ if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
+ &vk_semaphore)) {
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ return gfx::SwapResult::SWAP_FAILED;
+ }
+ current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
+ end_write_semaphore_ = vk_semaphore;
}
// Queue the present.
VkPresentInfoKHR present_info = {};
present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present_info.waitSemaphoreCount = 1;
- present_info.pWaitSemaphores = &current_image_data->render_semaphore;
+ present_info.pWaitSemaphores = &end_write_semaphore_;
present_info.swapchainCount = 1;
present_info.pSwapchains = &swap_chain_;
present_info.pImageIndices = &current_image_;
@@ -163,38 +102,23 @@ gfx::SwapResult VulkanSwapChain::SwapBuffers() {
if (VK_SUCCESS != result) {
return gfx::SwapResult::SWAP_FAILED;
}
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
+ end_write_semaphore_ = VK_NULL_HANDLE;
+ VkSemaphore vk_semaphore = CreateSemaphore(device);
uint32_t next_image = 0;
// Acquire then next image.
- result = vkAcquireNextImageKHR(device, swap_chain_, UINT64_MAX,
- next_present_semaphore_, VK_NULL_HANDLE,
- &next_image);
+ result = vkAcquireNextImageKHR(device, swap_chain_, UINT64_MAX, vk_semaphore,
+ VK_NULL_HANDLE, &next_image);
if (VK_SUCCESS != result) {
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
DLOG(ERROR) << "vkAcquireNextImageKHR() failed: " << result;
return gfx::SwapResult::SWAP_FAILED;
}
- auto& next_image_data = images_[next_image];
- // Swap in the "next_present_semaphore" into the newly acquired image. The
- // old "present_semaphore" for the image becomes the place holder for the next
- // present semaphore for the next image.
- std::swap(next_image_data->present_semaphore, next_present_semaphore_);
-
- // Submit our pre_raster_command_buffer for the next buffer. It sets the image
- // layout for rastering.
- next_image_data->pre_raster_command_buffer->Clear();
- CmdSetImageLayout(next_image_data->pre_raster_command_buffer.get(),
- next_image_data->image,
- VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL /* layout */,
- next_image_data->layout /* old_layout */);
- next_image_data->layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
-
- if (!next_image_data->pre_raster_command_buffer->Submit(
- 1, &next_image_data->present_semaphore, 0, nullptr)) {
- return gfx::SwapResult::SWAP_FAILED;
- }
-
current_image_ = next_image;
+ DCHECK(begin_write_semaphore_ == VK_NULL_HANDLE);
+ begin_write_semaphore_ = vk_semaphore;
return gfx::SwapResult::SWAP_ACK;
}
@@ -229,13 +153,11 @@ bool VulkanSwapChain::InitializeSwapChain(
&new_swap_chain);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkCreateSwapchainKHR() failed: " << result;
- return false;
+ result = vkCreateSwapchainKHR(device, &swap_chain_create_info, nullptr,
+ &new_swap_chain);
}
if (old_swap_chain) {
- result = vkQueueWaitIdle(device_queue_->GetVulkanQueue());
- DLOG_IF(ERROR, VK_SUCCESS != result)
- << "vkQueueWaitIdle failed: " << result;
old_swap_chain->Destroy();
old_swap_chain = nullptr;
}
@@ -248,12 +170,18 @@ bool VulkanSwapChain::InitializeSwapChain(
}
void VulkanSwapChain::DestroySwapChain() {
- VkDevice device = device_queue_->GetVulkanDevice();
-
- if (swap_chain_ != VK_NULL_HANDLE) {
- vkDestroySwapchainKHR(device, swap_chain_, nullptr);
- swap_chain_ = VK_NULL_HANDLE;
- }
+ if (swap_chain_ == VK_NULL_HANDLE)
+ return;
+
+ device_queue_->GetFenceHelper()->EnqueueCleanupTaskForSubmittedWork(
+ base::BindOnce(
+ [](VkSwapchainKHR swapchain, VulkanDeviceQueue* device_queue,
+ bool /* is_lost */) {
+ VkDevice device = device_queue->GetVulkanDevice();
+ vkDestroySwapchainKHR(device, swapchain, nullptr /* pAllocator */);
+ },
+ swap_chain_));
+ swap_chain_ = VK_NULL_HANDLE;
}
bool VulkanSwapChain::InitializeSwapImages(
@@ -277,108 +205,120 @@ bool VulkanSwapChain::InitializeSwapImages(
return false;
}
- // Generic semaphore creation structure.
- VkSemaphoreCreateInfo semaphore_create_info = {};
- semaphore_create_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
-
command_pool_ = device_queue_->CreateCommandPool();
if (!command_pool_)
return false;
images_.resize(image_count);
for (uint32_t i = 0; i < image_count; ++i) {
- images_[i].reset(new ImageData);
- std::unique_ptr<ImageData>& image_data = images_[i];
- image_data->image = images[i];
-
- // Setup semaphores.
- result = vkCreateSemaphore(device, &semaphore_create_info, nullptr,
- &image_data->render_semaphore);
- if (VK_SUCCESS != result) {
- DLOG(ERROR) << "vkCreateSemaphore(render) failed: " << result;
- return false;
- }
-
- result = vkCreateSemaphore(device, &semaphore_create_info, nullptr,
- &image_data->present_semaphore);
- if (VK_SUCCESS != result) {
- DLOG(ERROR) << "vkCreateSemaphore(present) failed: " << result;
- return false;
- }
-
+ auto& image_data = images_[i];
+ image_data.image = images[i];
// Initialize the command buffer for this buffer data.
- image_data->pre_raster_command_buffer =
- command_pool_->CreatePrimaryCommandBuffer();
- image_data->post_raster_command_buffer =
- command_pool_->CreatePrimaryCommandBuffer();
- }
-
- result = vkCreateSemaphore(device, &semaphore_create_info, nullptr,
- &next_present_semaphore_);
- if (VK_SUCCESS != result) {
- DLOG(ERROR) << "vkCreateSemaphore(next_present) failed: " << result;
- return false;
+ image_data.command_buffer = command_pool_->CreatePrimaryCommandBuffer();
}
+ VkSemaphore vk_semaphore = CreateSemaphore(device);
// Acquire the initial buffer.
- result = vkAcquireNextImageKHR(device, swap_chain_, UINT64_MAX,
- next_present_semaphore_, VK_NULL_HANDLE,
- &current_image_);
+ result = vkAcquireNextImageKHR(device, swap_chain_, UINT64_MAX, vk_semaphore,
+ VK_NULL_HANDLE, &current_image_);
if (VK_SUCCESS != result) {
DLOG(ERROR) << "vkAcquireNextImageKHR() failed: " << result;
return false;
}
-
- std::swap(images_[current_image_]->present_semaphore,
- next_present_semaphore_);
-
+ begin_write_semaphore_ = vk_semaphore;
return true;
}
void VulkanSwapChain::DestroySwapImages() {
- VkDevice device = device_queue_->GetVulkanDevice();
+ auto* fence_helper = device_queue_->GetFenceHelper();
+ fence_helper->EnqueueCleanupTaskForSubmittedWork(base::BindOnce(
+ [](VkSemaphore begin_semaphore, VkSemaphore end_semaphore,
+ std::vector<ImageData> images,
+ std::unique_ptr<VulkanCommandPool> command_pool,
+ VulkanDeviceQueue* device_queue, bool /* is_lost */) {
+ auto* vk_device = device_queue->GetVulkanDevice();
+ if (begin_semaphore)
+ vkDestroySemaphore(vk_device, begin_semaphore,
+ nullptr /* pAllocator */);
+ if (end_semaphore)
+ vkDestroySemaphore(vk_device, end_semaphore,
+ nullptr /* pAllocator */);
+ for (auto& image_data : images) {
+ if (!image_data.command_buffer)
+ continue;
+ image_data.command_buffer->Destroy();
+ image_data.command_buffer = nullptr;
+ }
+ command_pool->Destroy();
+ },
+ begin_write_semaphore_, end_write_semaphore_, std::move(images_),
+ std::move(command_pool_)));
+ begin_write_semaphore_ = VK_NULL_HANDLE;
+ end_write_semaphore_ = VK_NULL_HANDLE;
+ images_.clear();
+}
- if (VK_NULL_HANDLE != next_present_semaphore_) {
- vkDestroySemaphore(device, next_present_semaphore_, nullptr);
- next_present_semaphore_ = VK_NULL_HANDLE;
- }
+void VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
+ uint32_t* image_index,
+ VkImageLayout* image_layout,
+ VkSemaphore* semaphore) {
+ DCHECK(image);
+ DCHECK(image_index);
+ DCHECK(image_layout);
+ DCHECK(semaphore);
+ DCHECK(!is_writing_);
+ DCHECK(begin_write_semaphore_ != VK_NULL_HANDLE);
+ DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
- for (const std::unique_ptr<ImageData>& image_data : images_) {
- if (image_data->post_raster_command_buffer) {
- // Make sure command buffer is done processing.
- image_data->pre_raster_command_buffer->Wait(UINT64_MAX);
- image_data->pre_raster_command_buffer->Destroy();
- image_data->pre_raster_command_buffer.reset();
-
- // Make sure command buffer is done processing.
- image_data->post_raster_command_buffer->Wait(UINT64_MAX);
- image_data->post_raster_command_buffer->Destroy();
- image_data->post_raster_command_buffer.reset();
- }
+ auto& current_image_data = images_[current_image_];
+ *image = current_image_data.image;
+ *image_index = current_image_;
+ *image_layout = current_image_data.layout;
+ *semaphore = begin_write_semaphore_;
+ begin_write_semaphore_ = VK_NULL_HANDLE;
+ is_writing_ = true;
+}
- // Destroy Semaphores.
- if (VK_NULL_HANDLE != image_data->present_semaphore) {
- vkDestroySemaphore(device, image_data->present_semaphore, nullptr);
- image_data->present_semaphore = VK_NULL_HANDLE;
- }
+void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
+ VkSemaphore semaphore) {
+ DCHECK(is_writing_);
+ DCHECK(begin_write_semaphore_ == VK_NULL_HANDLE);
+ DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
- if (VK_NULL_HANDLE != image_data->render_semaphore) {
- vkDestroySemaphore(device, image_data->render_semaphore, nullptr);
- image_data->render_semaphore = VK_NULL_HANDLE;
- }
+ auto& current_image_data = images_[current_image_];
+ current_image_data.layout = image_layout;
+ end_write_semaphore_ = semaphore;
+ is_writing_ = false;
+}
- image_data->image = VK_NULL_HANDLE;
- }
- images_.clear();
+VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
+ : swap_chain_(swap_chain) {
+ swap_chain_->BeginWriteCurrentImage(&image_, &image_index_, &image_layout_,
+ &begin_semaphore_);
+}
- if (command_pool_) {
- command_pool_->Destroy();
- command_pool_.reset();
- }
+VulkanSwapChain::ScopedWrite::~ScopedWrite() {
+ DCHECK(begin_semaphore_ == VK_NULL_HANDLE);
+ swap_chain_->EndWriteCurrentImage(image_layout_, end_semaphore_);
}
-VulkanSwapChain::ImageData::ImageData() {}
+VkSemaphore VulkanSwapChain::ScopedWrite::TakeBeginSemaphore() {
+ DCHECK(begin_semaphore_ != VK_NULL_HANDLE);
+ VkSemaphore semaphore = begin_semaphore_;
+ begin_semaphore_ = VK_NULL_HANDLE;
+ return semaphore;
+}
+
+void VulkanSwapChain::ScopedWrite::SetEndSemaphore(VkSemaphore semaphore) {
+ DCHECK(end_semaphore_ == VK_NULL_HANDLE);
+ DCHECK(semaphore != VK_NULL_HANDLE);
+ end_semaphore_ = semaphore;
+}
-VulkanSwapChain::ImageData::~ImageData() {}
+VulkanSwapChain::ImageData::ImageData() = default;
+VulkanSwapChain::ImageData::ImageData(ImageData&& other) = default;
+VulkanSwapChain::ImageData::~ImageData() = default;
+VulkanSwapChain::ImageData& VulkanSwapChain::ImageData::operator=(
+ ImageData&& other) = default;
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.h b/chromium/gpu/vulkan/vulkan_swap_chain.h
index 1823af7c9bd..73037337f09 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.h
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.h
@@ -22,6 +22,35 @@ class VulkanDeviceQueue;
class VULKAN_EXPORT VulkanSwapChain {
public:
+ class VULKAN_EXPORT ScopedWrite {
+ public:
+ explicit ScopedWrite(VulkanSwapChain* swap_chain);
+ ~ScopedWrite();
+
+ VkImage image() const { return image_; }
+ uint32_t image_index() const { return image_index_; }
+ VkImageLayout image_layout() const { return image_layout_; }
+ void set_image_layout(VkImageLayout layout) { image_layout_ = layout; }
+
+ // Take the begin write semaphore. The ownership of the semaphore will be
+ // transferred to the caller.
+ VkSemaphore TakeBeginSemaphore();
+
+ // Set the end write semaphore. The ownership of the semaphore will be
+ // transferred to ScopedWrite.
+ void SetEndSemaphore(VkSemaphore);
+
+ private:
+ VulkanSwapChain* const swap_chain_;
+ VkImage image_ = VK_NULL_HANDLE;
+ uint32_t image_index_ = 0;
+ VkImageLayout image_layout_ = VK_IMAGE_LAYOUT_UNDEFINED;
+ VkSemaphore begin_semaphore_ = VK_NULL_HANDLE;
+ VkSemaphore end_semaphore_ = VK_NULL_HANDLE;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedWrite);
+ };
+
VulkanSwapChain();
~VulkanSwapChain();
@@ -31,38 +60,12 @@ class VULKAN_EXPORT VulkanSwapChain {
const VkSurfaceFormatKHR& surface_format,
std::unique_ptr<VulkanSwapChain> old_swap_chain);
void Destroy();
-
gfx::SwapResult SwapBuffers();
uint32_t num_images() const { return static_cast<uint32_t>(images_.size()); }
uint32_t current_image() const { return current_image_; }
const gfx::Size& size() const { return size_; }
- VulkanCommandBuffer* GetCurrentCommandBuffer() const {
- DCHECK_LT(current_image_, images_.size());
- return images_[current_image_]->pre_raster_command_buffer.get();
- }
-
- VkImage GetImage(uint32_t index) const {
- DCHECK_LT(index, images_.size());
- return images_[index]->image;
- }
-
- VkImage GetCurrentImage() const {
- DCHECK_LT(current_image_, images_.size());
- return images_[current_image_]->image;
- }
-
- VkImageLayout GetCurrentImageLayout() const {
- DCHECK_LT(current_image_, images_.size());
- return images_[current_image_]->layout;
- }
-
- void SetCurrentImageLayout(VkImageLayout layout) {
- DCHECK_LT(current_image_, images_.size());
- images_[current_image_]->layout = layout;
- }
-
private:
bool InitializeSwapChain(VkSurfaceKHR surface,
const VkSurfaceCapabilitiesKHR& surface_caps,
@@ -73,6 +76,11 @@ class VULKAN_EXPORT VulkanSwapChain {
bool InitializeSwapImages(const VkSurfaceCapabilitiesKHR& surface_caps,
const VkSurfaceFormatKHR& surface_format);
void DestroySwapImages();
+ void BeginWriteCurrentImage(VkImage* image,
+ uint32_t* image_index,
+ VkImageLayout* layout,
+ VkSemaphore* semaphore);
+ void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore);
VulkanDeviceQueue* device_queue_;
VkSwapchainKHR swap_chain_ = VK_NULL_HANDLE;
@@ -83,20 +91,22 @@ class VULKAN_EXPORT VulkanSwapChain {
struct ImageData {
ImageData();
+ ImageData(ImageData&& other);
~ImageData();
+ ImageData& operator=(ImageData&& other);
+
VkImage image = VK_NULL_HANDLE;
VkImageLayout layout = VK_IMAGE_LAYOUT_UNDEFINED;
- std::unique_ptr<VulkanCommandBuffer> pre_raster_command_buffer;
- std::unique_ptr<VulkanCommandBuffer> post_raster_command_buffer;
-
- VkSemaphore render_semaphore = VK_NULL_HANDLE;
- VkSemaphore present_semaphore = VK_NULL_HANDLE;
+ std::unique_ptr<VulkanCommandBuffer> command_buffer;
};
- std::vector<std::unique_ptr<ImageData>> images_;
+ std::vector<ImageData> images_;
uint32_t current_image_ = 0;
+ bool is_writing_ = false;
+ VkSemaphore begin_write_semaphore_ = VK_NULL_HANDLE;
+ VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE;
- VkSemaphore next_present_semaphore_ = VK_NULL_HANDLE;
+ DISALLOW_COPY_AND_ASSIGN(VulkanSwapChain);
};
} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_util.cc b/chromium/gpu/vulkan/vulkan_util.cc
index 645377c89e8..797d78e557c 100644
--- a/chromium/gpu/vulkan/vulkan_util.cc
+++ b/chromium/gpu/vulkan/vulkan_util.cc
@@ -8,20 +8,27 @@
namespace gpu {
-bool SubmitSignalVkSemaphore(VkQueue vk_queue,
- VkSemaphore vk_semaphore,
- VkFence vk_fence) {
+bool SubmitSignalVkSemaphores(VkQueue vk_queue,
+ const base::span<VkSemaphore>& vk_semaphores,
+ VkFence vk_fence) {
// Structure specifying a queue submit operation.
VkSubmitInfo submit_info = {VK_STRUCTURE_TYPE_SUBMIT_INFO};
- submit_info.signalSemaphoreCount = 1;
- submit_info.pSignalSemaphores = &vk_semaphore;
+ submit_info.signalSemaphoreCount = vk_semaphores.size();
+ submit_info.pSignalSemaphores = vk_semaphores.data();
const unsigned int submit_count = 1;
return vkQueueSubmit(vk_queue, submit_count, &submit_info, vk_fence) ==
VK_SUCCESS;
}
+bool SubmitSignalVkSemaphore(VkQueue vk_queue,
+ VkSemaphore vk_semaphore,
+ VkFence vk_fence) {
+ return SubmitSignalVkSemaphores(
+ vk_queue, base::span<VkSemaphore>(&vk_semaphore, 1), vk_fence);
+}
+
bool SubmitWaitVkSemaphores(VkQueue vk_queue,
- const std::vector<VkSemaphore>& vk_semaphores,
+ const base::span<VkSemaphore>& vk_semaphores,
VkFence vk_fence) {
DCHECK(!vk_semaphores.empty());
// Structure specifying a queue submit operation.
@@ -36,7 +43,8 @@ bool SubmitWaitVkSemaphores(VkQueue vk_queue,
bool SubmitWaitVkSemaphore(VkQueue vk_queue,
VkSemaphore vk_semaphore,
VkFence vk_fence) {
- return SubmitWaitVkSemaphores(vk_queue, {vk_semaphore}, vk_fence);
+ return SubmitWaitVkSemaphores(
+ vk_queue, base::span<VkSemaphore>(&vk_semaphore, 1), vk_fence);
}
VkSemaphore CreateExternalVkSemaphore(
diff --git a/chromium/gpu/vulkan/vulkan_util.h b/chromium/gpu/vulkan/vulkan_util.h
index 91bd763726d..cd85653bcf8 100644
--- a/chromium/gpu/vulkan/vulkan_util.h
+++ b/chromium/gpu/vulkan/vulkan_util.h
@@ -12,10 +12,19 @@
#include <memory>
#include <vector>
+#include "base/containers/span.h"
#include "gpu/vulkan/vulkan_export.h"
namespace gpu {
+// Submits semaphores to be signaled to the vulkan queue. Semaphores are
+// signaled once this submission is executed. vk_fence is an optional handle
+// to fence to be signaled once this submission completes execution.
+VULKAN_EXPORT bool SubmitSignalVkSemaphores(
+ VkQueue vk_queue,
+ const base::span<VkSemaphore>& vk_semaphore,
+ VkFence vk_fence = VK_NULL_HANDLE);
+
// Submits a semaphore to be signaled to the vulkan queue. Semaphore is
// signaled once this submission is executed. vk_fence is an optional handle
// to fence to be signaled once this submission completes execution.
@@ -28,7 +37,7 @@ VULKAN_EXPORT bool SubmitSignalVkSemaphore(VkQueue vk_queue,
// handle to fence to be signaled once this submission completes execution.
VULKAN_EXPORT bool SubmitWaitVkSemaphores(
VkQueue vk_queue,
- const std::vector<VkSemaphore>& vk_semaphores,
+ const base::span<VkSemaphore>& vk_semaphores,
VkFence vk_fence = VK_NULL_HANDLE);
// Submits a semaphore to be waited upon to the vulkan queue. Semaphore is
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
index fc14a805137..61b5a443fb3 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.cc
@@ -12,6 +12,7 @@
#include "gpu/vulkan/vulkan_instance.h"
#include "gpu/vulkan/vulkan_surface.h"
#include "ui/gfx/gpu_fence.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
@@ -129,4 +130,21 @@ VulkanImplementationWin32::GetExternalImageHandleType() {
return VK_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_TEXTURE_BIT;
}
+bool VulkanImplementationWin32::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return false;
+}
+
+bool VulkanImplementationWin32::CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
index d49134b574b..bbce467d4eb 100644
--- a/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
+++ b/chromium/gpu/vulkan/win32/vulkan_implementation_win32.h
@@ -39,6 +39,16 @@ class COMPONENT_EXPORT(VULKAN_WIN32) VulkanImplementationWin32
SemaphoreHandle GetSemaphoreHandle(VkDevice vk_device,
VkSemaphore vk_semaphore) override;
VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) override;
private:
VulkanInstance vulkan_instance_;
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index 69ef970a57d..54fb816cdb7 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -4,10 +4,12 @@
#include "gpu/vulkan/x/vulkan_implementation_x11.h"
+#include "base/base_paths.h"
#include "base/bind_helpers.h"
#include "base/files/file_path.h"
#include "base/logging.h"
#include "base/optional.h"
+#include "base/path_service.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_instance.h"
#include "gpu/vulkan/vulkan_posix_util.h"
@@ -15,6 +17,7 @@
#include "gpu/vulkan/vulkan_util.h"
#include "gpu/vulkan/x/vulkan_surface_x11.h"
#include "ui/gfx/gpu_fence.h"
+#include "ui/gfx/gpu_memory_buffer.h"
namespace gpu {
@@ -32,7 +35,8 @@ class ScopedUnsetDisplay {
} // namespace
-VulkanImplementationX11::VulkanImplementationX11() {
+VulkanImplementationX11::VulkanImplementationX11(bool use_swiftshader)
+ : VulkanImplementation(use_swiftshader) {
gfx::GetXDisplay();
}
@@ -57,13 +61,22 @@ bool VulkanImplementationX11::InitializeVulkanInstance(bool using_surface) {
VulkanFunctionPointers* vulkan_function_pointers =
gpu::GetVulkanFunctionPointers();
+ base::FilePath path;
+ if (use_swiftshader()) {
+ if (!base::PathService::Get(base::DIR_MODULE, &path))
+ return false;
+ path = path.Append("swiftshader/libvulkan.so");
+ } else {
+ path = base::FilePath("libvulkan.so.1");
+ }
+
base::NativeLibraryLoadError native_library_load_error;
- vulkan_function_pointers->vulkan_loader_library_ = base::LoadNativeLibrary(
- base::FilePath("libvulkan.so.1"), &native_library_load_error);
+ vulkan_function_pointers->vulkan_loader_library_ =
+ base::LoadNativeLibrary(path, &native_library_load_error);
if (!vulkan_function_pointers->vulkan_loader_library_)
return false;
- if (!vulkan_instance_.Initialize(required_extensions, {}))
+ if (!vulkan_instance_.Initialize(required_extensions, {}, use_swiftshader()))
return false;
return true;
}
@@ -83,6 +96,10 @@ bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
VkPhysicalDevice device,
const std::vector<VkQueueFamilyProperties>& queue_family_properties,
uint32_t queue_family_index) {
+ // TODO(samans): Don't early out once Swiftshader supports this method.
+ // https://crbug.com/swiftshader/129
+ if (use_swiftshader())
+ return true;
XDisplay* display = gfx::GetXDisplay();
return vkGetPhysicalDeviceXlibPresentationSupportKHR(
device, queue_family_index, display,
@@ -91,9 +108,13 @@ bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
std::vector<const char*>
VulkanImplementationX11::GetRequiredDeviceExtensions() {
- std::vector<const char*> extensions = {
- VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME,
- VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME};
+ std::vector<const char*> extensions;
+ // TODO(samans): Add these extensions once Swiftshader supports them.
+ // https://crbug.com/963988
+ if (!use_swiftshader()) {
+ extensions.push_back(VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME);
+ extensions.push_back(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME);
+ }
if (using_surface_)
extensions.push_back(VK_KHR_SWAPCHAIN_EXTENSION_NAME);
return extensions;
@@ -135,4 +156,21 @@ VulkanImplementationX11::GetExternalImageHandleType() {
return VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT;
}
+bool VulkanImplementationX11::CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) {
+ return false;
+}
+
+bool VulkanImplementationX11::CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
index 0d3116f0cba..c9345f7dc55 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.h
@@ -17,7 +17,7 @@ namespace gpu {
class COMPONENT_EXPORT(VULKAN_X11) VulkanImplementationX11
: public VulkanImplementation {
public:
- VulkanImplementationX11();
+ explicit VulkanImplementationX11(bool use_swiftshader = false);
~VulkanImplementationX11() override;
// VulkanImplementation:
@@ -40,6 +40,16 @@ class COMPONENT_EXPORT(VULKAN_X11) VulkanImplementationX11
SemaphoreHandle GetSemaphoreHandle(VkDevice vk_device,
VkSemaphore vk_semaphore) override;
VkExternalMemoryHandleTypeFlagBits GetExternalImageHandleType() override;
+ bool CanImportGpuMemoryBuffer(
+ gfx::GpuMemoryBufferType memory_buffer_type) override;
+ bool CreateImageFromGpuMemoryHandle(
+ VkDevice vk_device,
+ gfx::GpuMemoryBufferHandle gmb_handle,
+ gfx::Size size,
+ VkImage* vk_image,
+ VkImageCreateInfo* vk_image_info,
+ VkDeviceMemory* vk_device_memory,
+ VkDeviceSize* mem_allocation_size) override;
private:
bool using_surface_ = true;