summaryrefslogtreecommitdiff
path: root/chromium/gpu
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/gpu
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-c30a6232df03e1efbd9f3b226777b07e087a1122.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/gpu')
-rw-r--r--chromium/gpu/BUILD.gn5
-rw-r--r--chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt7
-rw-r--r--chromium/gpu/GLES2/gl2chromium_autogen.h9
-rw-r--r--chromium/gpu/GLES2/gl2extchromium.h1
-rw-r--r--chromium/gpu/PRESUBMIT.py9
-rw-r--r--chromium/gpu/command_buffer/PRESUBMIT.py12
-rwxr-xr-xchromium/gpu/command_buffer/build_gles2_cmd_buffer.py81
-rw-r--r--chromium/gpu/command_buffer/client/client_font_manager.cc2
-rw-r--r--chromium/gpu/command_buffer/client/cmd_buffer_helper.h2
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator.h2
-rw-r--r--chromium/gpu/command_buffer/client/fenced_allocator_test.cc34
-rw-r--r--chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h76
-rw-r--r--chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h80
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.cc61
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation.h1
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_autogen.h28
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h86
-rw-r--r--chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h95
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_autogen.h19
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h19
-rw-r--r--chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h27
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h19
-rw-r--r--chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h59
-rw-r--r--chromium/gpu/command_buffer/client/implementation_base.cc1
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation.cc13
-rw-r--r--chromium/gpu/command_buffer/client/raster_implementation_gles.cc7
-rw-r--r--chromium/gpu/command_buffer/client/ring_buffer.h1
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.cc3
-rw-r--r--chromium/gpu/command_buffer/client/shared_image_interface.h13
-rw-r--r--chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc5
-rw-r--r--chromium/gpu/command_buffer/common/BUILD.gn21
-rw-r--r--chromium/gpu/command_buffer/common/cmd_buffer_common.h2
-rw-r--r--chromium/gpu/command_buffer/common/discardable_handle.cc1
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format.h2
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h418
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h127
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h507
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.cc3
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils.h2
-rw-r--r--chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h13
-rw-r--r--chromium/gpu/command_buffer/common/mailbox.h4
-rw-r--r--chromium/gpu/command_buffer/common/raster_cmd_format.h1
-rw-r--r--chromium/gpu/command_buffer/common/swap_buffers_complete_params.h8
-rw-r--r--chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt11
-rw-r--r--chromium/gpu/command_buffer/service/BUILD.gn3
-rw-r--r--chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc8
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc134
-rw-r--r--chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h53
-rw-r--r--chromium/gpu/command_buffer/service/buffer_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/client_service_map.h2
-rw-r--r--chromium/gpu/command_buffer/service/context_group.cc1
-rw-r--r--chromium/gpu/command_buffer/service/context_state.h3
-rw-r--r--chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h45
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.cc279
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_backing.h24
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc35
-rw-r--r--chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc4
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.cc57
-rw-r--r--chromium/gpu/command_buffer/service/feature_info.h3
-rw-r--r--chromium/gpu/command_buffer/service/gl_context_virtual.cc8
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image.h19
-rw-r--r--chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h1
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc138
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h51
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc138
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder.h2
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h172
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc27
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h11
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h23
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc93
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc192
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc15
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h53
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc20
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h62
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc6
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h9
-rw-r--r--chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h15
-rw-r--r--chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc8
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.cc125
-rw-r--r--chromium/gpu/command_buffer/service/image_reader_gl_owner.h9
-rw-r--r--chromium/gpu/command_buffer/service/memory_tracking.h2
-rw-r--r--chromium/gpu/command_buffer/service/mock_texture_owner.h9
-rw-r--r--chromium/gpu/command_buffer/service/mocks.h1
-rw-r--r--chromium/gpu/command_buffer/service/program_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder.cc151
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc6
-rw-r--r--chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc31
-rw-r--r--chromium/gpu/command_buffer/service/sampler_manager.h1
-rw-r--r--chromium/gpu/command_buffer/service/scheduler.cc1
-rw-r--r--chromium/gpu/command_buffer/service/service_font_manager.cc1
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.cc22
-rw-r--r--chromium/gpu/command_buffer/service/service_utils.h6
-rw-r--r--chromium/gpu/command_buffer/service/shader_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.cc131
-rw-r--r--chromium/gpu/command_buffer/service/shared_context_state.h29
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc124
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc1313
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h18
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h296
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc16
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h22
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm257
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.cc72
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_factory.h11
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_manager.cc47
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation.cc16
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc8
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc3
-rw-r--r--chromium/gpu/command_buffer/service/shared_image_video.cc24
-rw-r--r--chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc127
-rw-r--r--chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h48
-rw-r--r--chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc21
-rw-r--r--chromium/gpu/command_buffer/service/surface_texture_gl_owner.h4
-rw-r--r--chromium/gpu/command_buffer/service/sync_point_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/test_shared_image_backing.cc8
-rw-r--r--chromium/gpu/command_buffer/service/texture_manager.h156
-rw-r--r--chromium/gpu/command_buffer/service/texture_owner.h8
-rw-r--r--chromium/gpu/command_buffer/service/vertex_array_manager.h1
-rw-r--r--chromium/gpu/command_buffer/service/vertex_attrib_manager.h2
-rw-r--r--chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc5
-rw-r--r--chromium/gpu/command_buffer/service/wrapped_sk_image.cc122
-rw-r--r--chromium/gpu/config/gpu_blocklist_unittest.cc6
-rw-r--r--chromium/gpu/config/gpu_control_list.cc65
-rw-r--r--chromium/gpu/config/gpu_control_list.h13
-rw-r--r--chromium/gpu/config/gpu_control_list_entry_unittest.cc34
-rw-r--r--chromium/gpu/config/gpu_control_list_format.txt78
-rw-r--r--chromium/gpu/config/gpu_control_list_testing.json41
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h171
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_autogen.cc524
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h2
-rw-r--r--chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h91
-rw-r--r--chromium/gpu/config/gpu_control_list_version_unittest.cc48
-rw-r--r--chromium/gpu/config/gpu_driver_bug_list.json71
-rw-r--r--chromium/gpu/config/gpu_finch_features.cc6
-rw-r--r--chromium/gpu/config/gpu_finch_features.h2
-rw-r--r--chromium/gpu/config/gpu_info.cc23
-rw-r--r--chromium/gpu/config/gpu_info.h46
-rw-r--r--chromium/gpu/config/gpu_info_collector.cc9
-rw-r--r--chromium/gpu/config/gpu_info_collector.h10
-rw-r--r--chromium/gpu/config/gpu_info_collector_win.cc204
-rw-r--r--chromium/gpu/config/gpu_info_unittest.cc15
-rw-r--r--chromium/gpu/config/gpu_lists_version.h2
-rw-r--r--chromium/gpu/config/gpu_preferences_unittest.cc85
-rw-r--r--chromium/gpu/config/gpu_switches.cc2
-rw-r--r--chromium/gpu/config/gpu_util.cc40
-rw-r--r--chromium/gpu/config/gpu_util.h14
-rw-r--r--chromium/gpu/config/gpu_workaround_list.txt5
-rwxr-xr-xchromium/gpu/config/process_json.py113
-rw-r--r--chromium/gpu/config/software_rendering_list.json63
-rwxr-xr-xchromium/gpu/gles2_conform_support/generate_gles2_conform_tests.py21
-rwxr-xr-xchromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py10
-rw-r--r--chromium/gpu/ipc/client/client_shared_image_interface.cc17
-rw-r--r--chromium/gpu/ipc/client/client_shared_image_interface.h6
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.cc36
-rw-r--r--chromium/gpu/ipc/client/shared_image_interface_proxy.h6
-rw-r--r--chromium/gpu/ipc/common/BUILD.gn29
-rw-r--r--chromium/gpu/ipc/common/PRESUBMIT.py4
-rw-r--r--chromium/gpu/ipc/common/gpu_info.mojom19
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.cc25
-rw-r--r--chromium/gpu/ipc/common/gpu_info_mojom_traits.h47
-rw-r--r--chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc1
-rw-r--r--chromium/gpu/ipc/common/gpu_messages.h6
-rw-r--r--chromium/gpu/ipc/common/gpu_param_traits_macros.h5
-rw-r--r--chromium/gpu/ipc/common/luid.mojom12
-rw-r--r--chromium/gpu/ipc/common/luid_mojom_traits.h27
-rw-r--r--chromium/gpu/ipc/in_process_gpu_thread_holder.cc10
-rw-r--r--chromium/gpu/ipc/scheduler_sequence.h2
-rw-r--r--chromium/gpu/ipc/service/BUILD.gn2
-rw-r--r--chromium/gpu/ipc/service/context_url.cc7
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.cc45
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager.h4
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_manager_delegate.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.cc11
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_test_common.h7
-rw-r--r--chromium/gpu/ipc/service/gpu_channel_unittest.cc2
-rw-r--r--chromium/gpu/ipc/service/gpu_init.cc270
-rw-r--r--chromium/gpu/ipc/service/gpu_init.h3
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc227
-rw-r--r--chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h133
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.cc71
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread.h13
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc4
-rw-r--r--chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h7
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_linux.cc1
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h23
-rw-r--r--chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm93
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.cc13
-rw-r--r--chromium/gpu/ipc/service/pass_through_image_transport_surface.h3
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.cc23
-rw-r--r--chromium/gpu/ipc/service/shared_image_stub.h6
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.cc19
-rw-r--r--chromium/gpu/ipc/service/stream_texture_android.h1
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.cc40
-rw-r--r--chromium/gpu/ipc/shared_image_interface_in_process.h9
-rwxr-xr-xchromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py5
-rw-r--r--chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc1
-rw-r--r--chromium/gpu/skia_bindings/grcontext_for_webgpu_interface.cc1
-rw-r--r--chromium/gpu/vulkan/BUILD.gn2
-rw-r--r--chromium/gpu/vulkan/PRESUBMIT.py4
-rw-r--r--chromium/gpu/vulkan/demo/vulkan_demo.cc1
-rwxr-xr-xchromium/gpu/vulkan/generate_bindings.py177
-rw-r--r--chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc54
-rw-r--r--chromium/gpu/vulkan/vma_wrapper.cc63
-rw-r--r--chromium/gpu/vulkan/vma_wrapper.h16
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.cc3
-rw-r--r--chromium/gpu/vulkan/vulkan_command_buffer.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_cxx.h42
-rw-r--r--chromium/gpu/vulkan/vulkan_cxx_unittest.cc90
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.cc10
-rw-r--r--chromium/gpu/vulkan/vulkan_device_queue.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.cc32
-rw-r--r--chromium/gpu/vulkan/vulkan_fence_helper.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.cc575
-rw-r--r--chromium/gpu/vulkan/vulkan_function_pointers.h610
-rw-r--r--chromium/gpu/vulkan/vulkan_image.cc9
-rw-r--r--chromium/gpu/vulkan/vulkan_image.h3
-rw-r--r--chromium/gpu/vulkan/vulkan_image_android.cc3
-rw-r--r--chromium/gpu/vulkan/vulkan_image_fuchsia.cc1
-rw-r--r--chromium/gpu/vulkan/vulkan_image_linux.cc1
-rw-r--r--chromium/gpu/vulkan/vulkan_image_unittest.cc15
-rw-r--r--chromium/gpu/vulkan/vulkan_image_win.cc1
-rw-r--r--chromium/gpu/vulkan/vulkan_implementation.h4
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.cc19
-rw-r--r--chromium/gpu/vulkan/vulkan_instance.h2
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.cc27
-rw-r--r--chromium/gpu/vulkan/vulkan_surface.h7
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.cc418
-rw-r--r--chromium/gpu/vulkan/vulkan_swap_chain.h105
-rw-r--r--chromium/gpu/vulkan/vulkan_util.cc47
-rw-r--r--chromium/gpu/vulkan/vulkan_util.h7
-rw-r--r--chromium/gpu/vulkan/x/vulkan_implementation_x11.cc5
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.cc53
-rw-r--r--chromium/gpu/vulkan/x/vulkan_surface_x11.h15
236 files changed, 8282 insertions, 4142 deletions
diff --git a/chromium/gpu/BUILD.gn b/chromium/gpu/BUILD.gn
index fb28891aaeb..ecec5ac2f77 100644
--- a/chromium/gpu/BUILD.gn
+++ b/chromium/gpu/BUILD.gn
@@ -73,6 +73,11 @@ component("raster") {
public_deps = [ "//gpu/command_buffer/client:raster_sources" ]
}
+component("mailbox") {
+ defines = [ "IS_GPU_MAILBOX_IMPL" ]
+ public_deps = [ "//gpu/command_buffer/common:mailbox_sources" ]
+}
+
component("webgpu") {
public_deps = [ "//gpu/command_buffer/client:webgpu_sources" ]
}
diff --git a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
index d5403074f9a..8494f6ddd6d 100644
--- a/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
+++ b/chromium/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_shared_image.txt
@@ -97,8 +97,9 @@ New Procedures and Functions
scope.
INVALID_ENUM is generated if <mode> is not one of
- SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM or
- SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM.
+ SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
+ SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM, or
+ SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM.
The command
@@ -116,6 +117,7 @@ New Tokens
Accepted by the <mode> parameter of BeginSharedImageAccessCHROMIUM:
SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM 0x8AF6
SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM 0x8AF7
+ SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM 0x8AF8
Errors
@@ -129,3 +131,4 @@ Revision History
2018-10-15 Created.
2018-10-31 Added Begin/End calls.
+ 2020-06-21 Added mode SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM.
diff --git a/chromium/gpu/GLES2/gl2chromium_autogen.h b/chromium/gpu/GLES2/gl2chromium_autogen.h
index 2d5d43fc670..ab465a6a4f8 100644
--- a/chromium/gpu/GLES2/gl2chromium_autogen.h
+++ b/chromium/gpu/GLES2/gl2chromium_autogen.h
@@ -96,6 +96,7 @@
#define glGetAttachedShaders GLES2_GET_FUN(GetAttachedShaders)
#define glGetAttribLocation GLES2_GET_FUN(GetAttribLocation)
#define glGetBooleanv GLES2_GET_FUN(GetBooleanv)
+#define glGetBooleani_v GLES2_GET_FUN(GetBooleani_v)
#define glGetBufferParameteri64v GLES2_GET_FUN(GetBufferParameteri64v)
#define glGetBufferParameteriv GLES2_GET_FUN(GetBufferParameteriv)
#define glGetError GLES2_GET_FUN(GetError)
@@ -406,5 +407,13 @@
GLES2_GET_FUN(BeginBatchReadAccessSharedImageCHROMIUM)
#define glEndBatchReadAccessSharedImageCHROMIUM \
GLES2_GET_FUN(EndBatchReadAccessSharedImageCHROMIUM)
+#define glEnableiOES GLES2_GET_FUN(EnableiOES)
+#define glDisableiOES GLES2_GET_FUN(DisableiOES)
+#define glBlendEquationiOES GLES2_GET_FUN(BlendEquationiOES)
+#define glBlendEquationSeparateiOES GLES2_GET_FUN(BlendEquationSeparateiOES)
+#define glBlendFunciOES GLES2_GET_FUN(BlendFunciOES)
+#define glBlendFuncSeparateiOES GLES2_GET_FUN(BlendFuncSeparateiOES)
+#define glColorMaskiOES GLES2_GET_FUN(ColorMaskiOES)
+#define glIsEnablediOES GLES2_GET_FUN(IsEnablediOES)
#endif // GPU_GLES2_GL2CHROMIUM_AUTOGEN_H_
diff --git a/chromium/gpu/GLES2/gl2extchromium.h b/chromium/gpu/GLES2/gl2extchromium.h
index b67d61d70c8..245abee2363 100644
--- a/chromium/gpu/GLES2/gl2extchromium.h
+++ b/chromium/gpu/GLES2/gl2extchromium.h
@@ -933,6 +933,7 @@ typedef void(GL_APIENTRYP PFNGLUNPREMULTIPLYANDDITHERCOPYCHROMIUMPROC)(
#define GL_CHROMIUM_shared_image 1
#define GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM 0x8AF6
#define GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM 0x8AF7
+#define GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM 0x8AF8
#endif /* GL_CHROMIUM_shared_image */
/* GL_CHROMIUM_program_completion_query */
diff --git a/chromium/gpu/PRESUBMIT.py b/chromium/gpu/PRESUBMIT.py
index a6311f47889..be1dc1aaeba 100644
--- a/chromium/gpu/PRESUBMIT.py
+++ b/chromium/gpu/PRESUBMIT.py
@@ -17,14 +17,7 @@ def CommonChecks(input_api, output_api):
sys.path = [
input_api.PresubmitLocalPath()
] + sys.path
- disabled_warnings = [
- 'W0622', # redefined-builtin
- 'R0923', # interface-not-implemented
- ]
- output.extend(input_api.canned_checks.RunPylint(
- input_api,
- output_api,
- disabled_warnings=disabled_warnings))
+ output.extend(input_api.canned_checks.RunPylint(input_api, output_api))
finally:
sys.path = sys_path_backup
diff --git a/chromium/gpu/command_buffer/PRESUBMIT.py b/chromium/gpu/command_buffer/PRESUBMIT.py
index c1f484c8c6b..c3ddb2ccae7 100644
--- a/chromium/gpu/command_buffer/PRESUBMIT.py
+++ b/chromium/gpu/command_buffer/PRESUBMIT.py
@@ -10,8 +10,8 @@ for more details on the presubmit API built into depot_tools.
import os.path
-def _IsGLES2CmdBufferFile(file):
- filename = os.path.basename(file.LocalPath())
+def _IsGLES2CmdBufferFile(affected_file):
+ filename = os.path.basename(affected_file.LocalPath())
if filename in [
'build_cmd_buffer_lib.py', 'build_gles2_cmd_buffer.py',
'gles2_cmd_buffer_functions.txt', 'gl2.h', 'gl2ext.h', 'gl3.h', 'gl31.h',
@@ -24,8 +24,8 @@ def _IsGLES2CmdBufferFile(file):
filename.endswith('_autogen.h'))
-def _IsRasterCmdBufferFile(file):
- filename = os.path.basename(file.LocalPath())
+def _IsRasterCmdBufferFile(affected_file):
+ filename = os.path.basename(affected_file.LocalPath())
if filename in [
'build_cmd_buffer_lib.py', 'build_raster_cmd_buffer.py',
'raster_cmd_buffer_functions.txt'
@@ -35,8 +35,8 @@ def _IsRasterCmdBufferFile(file):
return filename.startswith('raster') and filename.endswith('_autogen.h')
-def _IsWebGPUCmdBufferFile(file):
- filename = os.path.basename(file.LocalPath())
+def _IsWebGPUCmdBufferFile(affected_file):
+ filename = os.path.basename(affected_file.LocalPath())
if filename in [
'build_cmd_buffer_lib.py', 'build_webgpu_cmd_buffer.py',
'webgpu_cmd_buffer_functions.txt'
diff --git a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
index 653016c3ae2..75380476f35 100755
--- a/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
+++ b/chromium/gpu/command_buffer/build_gles2_cmd_buffer.py
@@ -308,6 +308,13 @@ _NAMED_TYPE_INFO = {
'GL_UNPACK_SKIP_IMAGES',
'GL_UNPACK_SKIP_PIXELS',
'GL_UNPACK_SKIP_ROWS',
+ 'GL_BLEND_EQUATION_RGB',
+ 'GL_BLEND_EQUATION_ALPHA',
+ 'GL_BLEND_SRC_RGB',
+ 'GL_BLEND_SRC_ALPHA',
+ 'GL_BLEND_DST_RGB',
+ 'GL_BLEND_DST_ALPHA',
+ 'GL_COLOR_WRITEMASK',
# GL_VERTEX_ARRAY_BINDING is the same as GL_VERTEX_ARRAY_BINDING_OES
# 'GL_VERTEX_ARRAY_BINDING',
],
@@ -324,6 +331,13 @@ _NAMED_TYPE_INFO = {
'GL_UNIFORM_BUFFER_BINDING',
'GL_UNIFORM_BUFFER_SIZE',
'GL_UNIFORM_BUFFER_START',
+ 'GL_BLEND_EQUATION_RGB',
+ 'GL_BLEND_EQUATION_ALPHA',
+ 'GL_BLEND_SRC_RGB',
+ 'GL_BLEND_SRC_ALPHA',
+ 'GL_BLEND_DST_RGB',
+ 'GL_BLEND_DST_ALPHA',
+ 'GL_COLOR_WRITEMASK',
],
'invalid': [
'GL_FOG_HINT',
@@ -1578,6 +1592,7 @@ _NAMED_TYPE_INFO = {
'type': 'GLenum',
'is_complete': True,
'valid': [
+ 'GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM',
'GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM',
'GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM',
],
@@ -1828,6 +1843,11 @@ _FUNCTION_INFO = {
'no_gl': True,
'expectation': False,
},
+ 'ColorMaskiOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'unit_test': False,
+ 'extension': 'OES_draw_buffers_indexed',
+ },
'ContextVisibilityHintCHROMIUM': {
'decoder_func': 'DoContextVisibilityHintCHROMIUM',
'extension': 'CHROMIUM_context_visibility_hint',
@@ -1954,6 +1974,15 @@ _FUNCTION_INFO = {
'0': 'GL_FUNC_SUBTRACT'
},
},
+ 'BlendEquationiOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'unit_test': False,
+ 'extension': 'OES_draw_buffers_indexed',
+ 'valid_args': {
+ '1': 'GL_FUNC_SUBTRACT',
+ '2': 'GL_FUNC_SUBTRACT'
+ },
+ },
'BlendEquationSeparate': {
'type': 'StateSet',
'state': 'BlendEquation',
@@ -1961,14 +1990,33 @@ _FUNCTION_INFO = {
'0': 'GL_FUNC_SUBTRACT'
},
},
+ 'BlendEquationSeparateiOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'unit_test': False,
+ 'extension': 'OES_draw_buffers_indexed',
+ 'valid_args': {
+ '1': 'GL_FUNC_SUBTRACT',
+ '2': 'GL_FUNC_SUBTRACT'
+ },
+ },
'BlendFunc': {
'type': 'StateSetRGBAlpha',
'state': 'BlendFunc',
},
+ 'BlendFunciOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'unit_test': False,
+ 'extension': 'OES_draw_buffers_indexed',
+ },
'BlendFuncSeparate': {
'type': 'StateSet',
'state': 'BlendFunc',
},
+ 'BlendFuncSeparateiOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'unit_test': False,
+ 'extension': 'OES_draw_buffers_indexed',
+ },
'BlendBarrierKHR': {
'gl_test_func': 'glBlendBarrierKHR',
'extension': 'KHR_blend_equation_advanced',
@@ -2079,6 +2127,13 @@ _FUNCTION_INFO = {
'impl_func': False,
'client_test': False,
},
+ 'DisableiOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'extension': 'OES_draw_buffers_indexed',
+ 'decoder_func': 'DoDisableiOES',
+ 'impl_func': False,
+ 'unit_test': False,
+ },
'DisableVertexAttribArray': {
'decoder_func': 'DoDisableVertexAttribArray',
'impl_func': False,
@@ -2139,6 +2194,13 @@ _FUNCTION_INFO = {
'impl_func': False,
'client_test': False,
},
+ 'EnableiOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'extension': 'OES_draw_buffers_indexed',
+ 'decoder_func': 'DoEnableiOES',
+ 'impl_func': False,
+ 'unit_test': False,
+ },
'EnableVertexAttribArray': {
'decoder_func': 'DoEnableVertexAttribArray',
'impl_func': False,
@@ -2325,6 +2387,15 @@ _FUNCTION_INFO = {
'decoder_func': 'DoGetBooleanv',
'gl_test_func': 'glGetIntegerv',
},
+ 'GetBooleani_v': {
+ 'type': 'GETn',
+ 'result': ['SizedResult<GLboolean>'],
+ 'decoder_func': 'DoGetBooleani_v',
+ 'shadowed': True,
+ 'client_test': False,
+ 'unit_test': False,
+ 'es3': True
+ },
'GetBufferParameteri64v': {
'type': 'GETn',
'result': ['SizedResult<GLint64>'],
@@ -2721,6 +2792,16 @@ _FUNCTION_INFO = {
'impl_func': False,
'expectation': False,
},
+ 'IsEnablediOES': {
+ 'extension_flag': 'oes_draw_buffers_indexed',
+ 'unit_test': False,
+ 'extension': 'OES_draw_buffers_indexed',
+ 'type': 'Is',
+ 'decoder_func': 'DoIsEnablediOES',
+ 'client_test': False,
+ 'impl_func': False,
+ 'expectation': False,
+ },
'IsFramebuffer': {
'type': 'Is',
'decoder_func': 'DoIsFramebuffer',
diff --git a/chromium/gpu/command_buffer/client/client_font_manager.cc b/chromium/gpu/command_buffer/client/client_font_manager.cc
index 6dc6f269cdb..8a11104ee88 100644
--- a/chromium/gpu/command_buffer/client/client_font_manager.cc
+++ b/chromium/gpu/command_buffer/client/client_font_manager.cc
@@ -4,6 +4,8 @@
#include "gpu/command_buffer/client/client_font_manager.h"
+#include "base/logging.h"
+
namespace gpu {
namespace raster {
diff --git a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
index eca26c60e79..7111a460147 100644
--- a/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
+++ b/chromium/gpu/command_buffer/client/cmd_buffer_helper.h
@@ -11,7 +11,7 @@
#include <stdint.h>
#include <string.h>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator.h b/chromium/gpu/command_buffer/client/fenced_allocator.h
index 7b238dd8249..d3299c11768 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator.h
+++ b/chromium/gpu/command_buffer/client/fenced_allocator.h
@@ -13,7 +13,7 @@
#include <vector>
#include "base/bind.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "gpu/gpu_export.h"
diff --git a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
index ed09dd197f6..f68bf95f8b3 100644
--- a/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
+++ b/chromium/gpu/command_buffer/client/fenced_allocator_test.cc
@@ -119,7 +119,7 @@ TEST_F(FencedAllocatorTest, TestOutOfMemory) {
const unsigned int kSize = 16;
const unsigned int kAllocCount = kBufferSize / kSize;
- CHECK(kAllocCount * kSize == kBufferSize);
+ CHECK_EQ(kAllocCount * kSize, kBufferSize);
// Allocate several buffers to fill in the memory.
FencedAllocator::Offset offsets[kAllocCount];
@@ -161,7 +161,7 @@ TEST_F(FencedAllocatorTest, TestFreePendingToken) {
const unsigned int kSize = 16;
const unsigned int kAllocCount = kBufferSize / kSize;
- CHECK(kAllocCount * kSize == kBufferSize);
+ CHECK_EQ(kAllocCount * kSize, kBufferSize);
// Allocate several buffers to fill in the memory.
FencedAllocator::Offset offsets[kAllocCount];
@@ -209,7 +209,7 @@ TEST_F(FencedAllocatorTest, FreeUnused) {
const unsigned int kSize = 16;
const unsigned int kAllocCount = kBufferSize / kSize;
- CHECK(kAllocCount * kSize == kBufferSize);
+ CHECK_EQ(kAllocCount * kSize, kBufferSize);
// Allocate several buffers to fill in the memory.
FencedAllocator::Offset offsets[kAllocCount];
@@ -406,7 +406,7 @@ TEST_F(FencedAllocatorWrapperTest, TestBasic) {
allocator_->CheckConsistency();
const unsigned int kSize = 16;
- void *pointer = allocator_->Alloc(kSize);
+ void* pointer = allocator_->Alloc(kSize);
ASSERT_TRUE(pointer);
EXPECT_LE(buffer_.get(), static_cast<char *>(pointer));
EXPECT_GE(kBufferSize, static_cast<char *>(pointer) - buffer_.get() + kSize);
@@ -415,14 +415,14 @@ TEST_F(FencedAllocatorWrapperTest, TestBasic) {
allocator_->Free(pointer);
EXPECT_TRUE(allocator_->CheckConsistency());
- char *pointer_char = allocator_->AllocTyped<char>(kSize);
+ char* pointer_char = allocator_->AllocTyped<char>(kSize);
ASSERT_TRUE(pointer_char);
EXPECT_LE(buffer_.get(), pointer_char);
EXPECT_GE(buffer_.get() + kBufferSize, pointer_char + kSize);
allocator_->Free(pointer_char);
EXPECT_TRUE(allocator_->CheckConsistency());
- unsigned int *pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
+ unsigned int* pointer_uint = allocator_->AllocTyped<unsigned int>(kSize);
ASSERT_TRUE(pointer_uint);
EXPECT_LE(buffer_.get(), reinterpret_cast<char *>(pointer_uint));
EXPECT_GE(buffer_.get() + kBufferSize,
@@ -439,7 +439,7 @@ TEST_F(FencedAllocatorWrapperTest, TestBasic) {
TEST_F(FencedAllocatorWrapperTest, TestAllocZero) {
allocator_->CheckConsistency();
- void *pointer = allocator_->Alloc(0);
+ void* pointer = allocator_->Alloc(0);
ASSERT_FALSE(pointer);
EXPECT_TRUE(allocator_->CheckConsistency());
}
@@ -449,15 +449,15 @@ TEST_F(FencedAllocatorWrapperTest, TestAlignment) {
allocator_->CheckConsistency();
const unsigned int kSize1 = 75;
- void *pointer1 = allocator_->Alloc(kSize1);
+ void* pointer1 = allocator_->Alloc(kSize1);
ASSERT_TRUE(pointer1);
- EXPECT_EQ(reinterpret_cast<intptr_t>(pointer1) & (kAllocAlignment - 1), 0);
+ EXPECT_TRUE(base::IsAligned(pointer1, kAllocAlignment));
EXPECT_TRUE(allocator_->CheckConsistency());
const unsigned int kSize2 = 43;
- void *pointer2 = allocator_->Alloc(kSize2);
+ void* pointer2 = allocator_->Alloc(kSize2);
ASSERT_TRUE(pointer2);
- EXPECT_EQ(reinterpret_cast<intptr_t>(pointer2) & (kAllocAlignment - 1), 0);
+ EXPECT_TRUE(base::IsAligned(pointer2, kAllocAlignment));
EXPECT_TRUE(allocator_->CheckConsistency());
allocator_->Free(pointer2);
@@ -473,10 +473,10 @@ TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
const unsigned int kSize = 16;
const unsigned int kAllocCount = kBufferSize / kSize;
- CHECK(kAllocCount * kSize == kBufferSize);
+ CHECK_EQ(kAllocCount * kSize, kBufferSize);
// Allocate several buffers to fill in the memory.
- void *pointers[kAllocCount];
+ void* pointers[kAllocCount];
for (unsigned int i = 0; i < kAllocCount; ++i) {
pointers[i] = allocator_->Alloc(kSize);
EXPECT_TRUE(pointers[i]);
@@ -484,7 +484,7 @@ TEST_F(FencedAllocatorWrapperTest, TestOutOfMemory) {
}
// This allocation should fail.
- void *pointer_failed = allocator_->Alloc(kSize);
+ void* pointer_failed = allocator_->Alloc(kSize);
EXPECT_FALSE(pointer_failed);
EXPECT_TRUE(allocator_->CheckConsistency());
@@ -513,10 +513,10 @@ TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
const unsigned int kSize = 16;
const unsigned int kAllocCount = kBufferSize / kSize;
- CHECK(kAllocCount * kSize == kBufferSize);
+ CHECK_EQ(kAllocCount * kSize, kBufferSize);
// Allocate several buffers to fill in the memory.
- void *pointers[kAllocCount];
+ void* pointers[kAllocCount];
for (unsigned int i = 0; i < kAllocCount; ++i) {
pointers[i] = allocator_->Alloc(kSize);
EXPECT_TRUE(pointers[i]);
@@ -524,7 +524,7 @@ TEST_F(FencedAllocatorWrapperTest, TestFreePendingToken) {
}
// This allocation should fail.
- void *pointer_failed = allocator_->Alloc(kSize);
+ void* pointer_failed = allocator_->Alloc(kSize);
EXPECT_FALSE(pointer_failed);
EXPECT_TRUE(allocator_->CheckConsistency());
diff --git a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
index 76546b8dd53..dfe2d8512a2 100644
--- a/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_c_lib_autogen.h
@@ -423,6 +423,11 @@ GLint GL_APIENTRY GLES2GetAttribLocation(GLuint program, const char* name) {
void GL_APIENTRY GLES2GetBooleanv(GLenum pname, GLboolean* params) {
gles2::GetGLContext()->GetBooleanv(pname, params);
}
+void GL_APIENTRY GLES2GetBooleani_v(GLenum pname,
+ GLuint index,
+ GLboolean* data) {
+ gles2::GetGLContext()->GetBooleani_v(pname, index, data);
+}
void GL_APIENTRY GLES2GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) {
@@ -1832,6 +1837,41 @@ void GL_APIENTRY GLES2BeginBatchReadAccessSharedImageCHROMIUM() {
void GL_APIENTRY GLES2EndBatchReadAccessSharedImageCHROMIUM() {
gles2::GetGLContext()->EndBatchReadAccessSharedImageCHROMIUM();
}
+void GL_APIENTRY GLES2EnableiOES(GLenum target, GLuint index) {
+ gles2::GetGLContext()->EnableiOES(target, index);
+}
+void GL_APIENTRY GLES2DisableiOES(GLenum target, GLuint index) {
+ gles2::GetGLContext()->DisableiOES(target, index);
+}
+void GL_APIENTRY GLES2BlendEquationiOES(GLuint buf, GLenum mode) {
+ gles2::GetGLContext()->BlendEquationiOES(buf, mode);
+}
+void GL_APIENTRY GLES2BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) {
+ gles2::GetGLContext()->BlendEquationSeparateiOES(buf, modeRGB, modeAlpha);
+}
+void GL_APIENTRY GLES2BlendFunciOES(GLuint buf, GLenum src, GLenum dst) {
+ gles2::GetGLContext()->BlendFunciOES(buf, src, dst);
+}
+void GL_APIENTRY GLES2BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ gles2::GetGLContext()->BlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha,
+ dstAlpha);
+}
+void GL_APIENTRY GLES2ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) {
+ gles2::GetGLContext()->ColorMaskiOES(buf, r, g, b, a);
+}
+GLboolean GL_APIENTRY GLES2IsEnablediOES(GLenum target, GLuint index) {
+ return gles2::GetGLContext()->IsEnablediOES(target, index);
+}
namespace gles2 {
@@ -2169,6 +2209,10 @@ extern const NameToFunc g_gles2_function_table[] = {
reinterpret_cast<GLES2FunctionPointer>(glGetBooleanv),
},
{
+ "glGetBooleani_v",
+ reinterpret_cast<GLES2FunctionPointer>(glGetBooleani_v),
+ },
+ {
"glGetBufferParameteri64v",
reinterpret_cast<GLES2FunctionPointer>(glGetBufferParameteri64v),
},
@@ -3277,6 +3321,38 @@ extern const NameToFunc g_gles2_function_table[] = {
glEndBatchReadAccessSharedImageCHROMIUM),
},
{
+ "glEnableiOES",
+ reinterpret_cast<GLES2FunctionPointer>(glEnableiOES),
+ },
+ {
+ "glDisableiOES",
+ reinterpret_cast<GLES2FunctionPointer>(glDisableiOES),
+ },
+ {
+ "glBlendEquationiOES",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendEquationiOES),
+ },
+ {
+ "glBlendEquationSeparateiOES",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendEquationSeparateiOES),
+ },
+ {
+ "glBlendFunciOES",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendFunciOES),
+ },
+ {
+ "glBlendFuncSeparateiOES",
+ reinterpret_cast<GLES2FunctionPointer>(glBlendFuncSeparateiOES),
+ },
+ {
+ "glColorMaskiOES",
+ reinterpret_cast<GLES2FunctionPointer>(glColorMaskiOES),
+ },
+ {
+ "glIsEnablediOES",
+ reinterpret_cast<GLES2FunctionPointer>(glIsEnablediOES),
+ },
+ {
nullptr,
nullptr,
},
diff --git a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
index 7aa80690359..efe2fe42a7b 100644
--- a/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_cmd_helper_autogen.h
@@ -851,6 +851,16 @@ void GetBooleanv(GLenum pname,
}
}
+void GetBooleani_v(GLenum pname,
+ GLuint index,
+ uint32_t data_shm_id,
+ uint32_t data_shm_offset) {
+ gles2::cmds::GetBooleani_v* c = GetCmdSpace<gles2::cmds::GetBooleani_v>();
+ if (c) {
+ c->Init(pname, index, data_shm_id, data_shm_offset);
+ }
+}
+
void GetBufferParameteri64v(GLenum target,
GLenum pname,
uint32_t params_shm_id,
@@ -3415,4 +3425,74 @@ void EndBatchReadAccessSharedImageCHROMIUM() {
}
}
+void EnableiOES(GLenum target, GLuint index) {
+ gles2::cmds::EnableiOES* c = GetCmdSpace<gles2::cmds::EnableiOES>();
+ if (c) {
+ c->Init(target, index);
+ }
+}
+
+void DisableiOES(GLenum target, GLuint index) {
+ gles2::cmds::DisableiOES* c = GetCmdSpace<gles2::cmds::DisableiOES>();
+ if (c) {
+ c->Init(target, index);
+ }
+}
+
+void BlendEquationiOES(GLuint buf, GLenum mode) {
+ gles2::cmds::BlendEquationiOES* c =
+ GetCmdSpace<gles2::cmds::BlendEquationiOES>();
+ if (c) {
+ c->Init(buf, mode);
+ }
+}
+
+void BlendEquationSeparateiOES(GLuint buf, GLenum modeRGB, GLenum modeAlpha) {
+ gles2::cmds::BlendEquationSeparateiOES* c =
+ GetCmdSpace<gles2::cmds::BlendEquationSeparateiOES>();
+ if (c) {
+ c->Init(buf, modeRGB, modeAlpha);
+ }
+}
+
+void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) {
+ gles2::cmds::BlendFunciOES* c = GetCmdSpace<gles2::cmds::BlendFunciOES>();
+ if (c) {
+ c->Init(buf, src, dst);
+ }
+}
+
+void BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ gles2::cmds::BlendFuncSeparateiOES* c =
+ GetCmdSpace<gles2::cmds::BlendFuncSeparateiOES>();
+ if (c) {
+ c->Init(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ }
+}
+
+void ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) {
+ gles2::cmds::ColorMaskiOES* c = GetCmdSpace<gles2::cmds::ColorMaskiOES>();
+ if (c) {
+ c->Init(buf, r, g, b, a);
+ }
+}
+
+void IsEnablediOES(GLenum target,
+ GLuint index,
+ uint32_t result_shm_id,
+ uint32_t result_shm_offset) {
+ gles2::cmds::IsEnablediOES* c = GetCmdSpace<gles2::cmds::IsEnablediOES>();
+ if (c) {
+ c->Init(target, index, result_shm_id, result_shm_offset);
+ }
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_CMD_HELPER_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.cc b/chromium/gpu/command_buffer/client/gles2_implementation.cc
index c5d65e65595..49d050e7e0d 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.cc
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.cc
@@ -647,6 +647,24 @@ void GLES2Implementation::Disable(GLenum cap) {
CheckGLError();
}
+void GLES2Implementation::DisableiOES(GLenum target, GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glDisableiOES("
+ << GLES2Util::GetStringEnum(target) << ", " << index
+ << ")");
+ if (index == 0u && target == GL_BLEND) {
+ bool changed = false;
+ DCHECK(target == GL_BLEND);
+ if (!state_.SetCapabilityState(target, false, &changed) || changed) {
+ helper_->DisableiOES(target, index);
+ }
+ } else {
+ helper_->DisableiOES(target, index);
+ }
+
+ CheckGLError();
+}
+
void GLES2Implementation::Enable(GLenum cap) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnable("
@@ -658,6 +676,24 @@ void GLES2Implementation::Enable(GLenum cap) {
CheckGLError();
}
+void GLES2Implementation::EnableiOES(GLenum target, GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glEnableiOES("
+ << GLES2Util::GetStringEnum(target) << ", " << index
+ << ")");
+ if (index == 0u && target == GL_BLEND) {
+ bool changed = false;
+ DCHECK(target == GL_BLEND);
+ if (!state_.SetCapabilityState(target, true, &changed) || changed) {
+ helper_->EnableiOES(target, index);
+ }
+ } else {
+ helper_->EnableiOES(target, index);
+ }
+
+ CheckGLError();
+}
+
GLboolean GLES2Implementation::IsEnabled(GLenum cap) {
GPU_CLIENT_SINGLE_THREAD_CHECK();
GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnabled("
@@ -680,6 +716,24 @@ GLboolean GLES2Implementation::IsEnabled(GLenum cap) {
return state;
}
+GLboolean GLES2Implementation::IsEnablediOES(GLenum target, GLuint index) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glIsEnablediOES("
+ << GLES2Util::GetStringCapability(target) << ", " << index
+ << ")");
+ bool state = false;
+ typedef cmds::IsEnabled::Result Result;
+ auto result = GetResultAs<Result>();
+ *result = 0;
+ helper_->IsEnablediOES(target, index, GetResultShmId(), result.offset());
+ WaitForCmd();
+ state = (*result) != 0;
+
+ GPU_CLIENT_LOG("returned " << state);
+ CheckGLError();
+ return state;
+}
+
bool GLES2Implementation::GetHelper(GLenum pname, GLint* params) {
// TODO(zmo): For all the BINDING points, there is a possibility where
// resources are shared among multiple contexts, that the cached points
@@ -1105,6 +1159,13 @@ bool GLES2Implementation::GetBooleanvHelper(GLenum pname, GLboolean* params) {
return true;
}
+bool GLES2Implementation::GetBooleani_vHelper(GLenum pname,
+ GLuint index,
+ GLboolean* data) {
+ // TODO(zmo): Implement client side caching.
+ return false;
+}
+
bool GLES2Implementation::GetFloatvHelper(GLenum pname, GLfloat* params) {
// TODO(gman): Make this handle pnames that return more than 1 value.
switch (pname) {
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation.h b/chromium/gpu/command_buffer/client/gles2_implementation.h
index e0db2688e6b..5b828ea7208 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation.h
@@ -599,6 +599,7 @@ class GLES2_IMPL_EXPORT GLES2Implementation : public GLES2Interface,
bool GetHelper(GLenum pname, GLint* params);
GLuint GetBoundBufferHelper(GLenum target);
bool GetBooleanvHelper(GLenum pname, GLboolean* params);
+ bool GetBooleani_vHelper(GLenum pname, GLuint index, GLboolean* data);
bool GetBufferParameteri64vHelper(
GLenum target, GLenum pname, GLint64* params);
bool GetBufferParameterivHelper(GLenum target, GLenum pname, GLint* params);
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
index 6fb5046d5de..2ce5be504ca 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_autogen.h
@@ -316,6 +316,8 @@ GLint GetAttribLocation(GLuint program, const char* name) override;
void GetBooleanv(GLenum pname, GLboolean* params) override;
+void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) override;
+
void GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) override;
@@ -1291,4 +1293,30 @@ void BeginBatchReadAccessSharedImageCHROMIUM() override;
void EndBatchReadAccessSharedImageCHROMIUM() override;
+void EnableiOES(GLenum target, GLuint index) override;
+
+void DisableiOES(GLenum target, GLuint index) override;
+
+void BlendEquationiOES(GLuint buf, GLenum mode) override;
+
+void BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) override;
+
+void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) override;
+
+void BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) override;
+
+void ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) override;
+
+GLboolean IsEnablediOES(GLenum target, GLuint index) override;
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
index 69cd2bd09f6..6fa311c7a15 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_impl_autogen.h
@@ -889,6 +889,34 @@ void GLES2Implementation::GetBooleanv(GLenum pname, GLboolean* params) {
});
CheckGLError();
}
+void GLES2Implementation::GetBooleani_v(GLenum pname,
+ GLuint index,
+ GLboolean* data) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_VALIDATE_DESTINATION_INITALIZATION(GLboolean, data);
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glGetBooleani_v("
+ << GLES2Util::GetStringIndexedGLState(pname) << ", "
+ << index << ", " << static_cast<const void*>(data) << ")");
+ TRACE_EVENT0("gpu", "GLES2Implementation::GetBooleani_v");
+ if (GetBooleani_vHelper(pname, index, data)) {
+ return;
+ }
+ typedef cmds::GetBooleani_v::Result Result;
+ ScopedResultPtr<Result> result = GetResultAs<Result>();
+ if (!result) {
+ return;
+ }
+ result->SetNumResults(0);
+ helper_->GetBooleani_v(pname, index, GetResultShmId(), result.offset());
+ WaitForCmd();
+ result->CopyResult(data);
+ GPU_CLIENT_LOG_CODE_BLOCK({
+ for (int32_t i = 0; i < result->GetNumResults(); ++i) {
+ GPU_CLIENT_LOG(" " << i << ": " << result->GetData()[i]);
+ }
+ });
+ CheckGLError();
+}
void GLES2Implementation::GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) {
@@ -3731,4 +3759,62 @@ void GLES2Implementation::EndBatchReadAccessSharedImageCHROMIUM() {
CheckGLError();
}
+void GLES2Implementation::BlendEquationiOES(GLuint buf, GLenum mode) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquationiOES(" << buf
+ << ", " << GLES2Util::GetStringEnum(mode) << ")");
+ helper_->BlendEquationiOES(buf, mode);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendEquationSeparateiOES("
+ << buf << ", " << GLES2Util::GetStringEnum(modeRGB) << ", "
+ << GLES2Util::GetStringEnum(modeAlpha) << ")");
+ helper_->BlendEquationSeparateiOES(buf, modeRGB, modeAlpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendFunciOES(GLuint buf, GLenum src, GLenum dst) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFunciOES(" << buf << ", "
+ << GLES2Util::GetStringEnum(src) << ", "
+ << GLES2Util::GetStringEnum(dst) << ")");
+ helper_->BlendFunciOES(buf, src, dst);
+ CheckGLError();
+}
+
+void GLES2Implementation::BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glBlendFuncSeparateiOES(" << buf
+ << ", " << GLES2Util::GetStringEnum(srcRGB) << ", "
+ << GLES2Util::GetStringEnum(dstRGB) << ", "
+ << GLES2Util::GetStringEnum(srcAlpha) << ", "
+ << GLES2Util::GetStringEnum(dstAlpha) << ")");
+ helper_->BlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ CheckGLError();
+}
+
+void GLES2Implementation::ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) {
+ GPU_CLIENT_SINGLE_THREAD_CHECK();
+ GPU_CLIENT_LOG("[" << GetLogPrefix() << "] glColorMaskiOES(" << buf << ", "
+ << GLES2Util::GetStringBool(r) << ", "
+ << GLES2Util::GetStringBool(g) << ", "
+ << GLES2Util::GetStringBool(b) << ", "
+ << GLES2Util::GetStringBool(a) << ")");
+ helper_->ColorMaskiOES(buf, r, g, b, a);
+ CheckGLError();
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
index 5205f3e98f7..c7c3b16a382 100644
--- a/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_implementation_unittest_autogen.h
@@ -758,6 +758,24 @@ TEST_F(GLES2ImplementationTest, GetBooleanv) {
EXPECT_EQ(static_cast<ResultType>(1), result);
}
+TEST_F(GLES2ImplementationTest, GetBooleani_v) {
+ struct Cmds {
+ cmds::GetBooleani_v cmd;
+ };
+ typedef cmds::GetBooleani_v::Result::Type ResultType;
+ ResultType result = 0;
+ Cmds expected;
+ ExpectedMemoryInfo result1 =
+ GetExpectedResultMemory(sizeof(uint32_t) + sizeof(ResultType));
+ expected.cmd.Init(123, 2, result1.id, result1.offset);
+ EXPECT_CALL(*command_buffer(), OnFlush())
+ .WillOnce(SetMemory(result1.ptr, SizedResultHelper<ResultType>(1)))
+ .RetiresOnSaturation();
+ gl_->GetBooleani_v(123, 2, &result);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+ EXPECT_EQ(static_cast<ResultType>(1), result);
+}
+
TEST_F(GLES2ImplementationTest, GetBufferParameteri64v) {
struct Cmds {
cmds::GetBufferParameteri64v cmd;
@@ -3124,4 +3142,81 @@ TEST_F(GLES2ImplementationTest, EndBatchReadAccessSharedImageCHROMIUM) {
gl_->EndBatchReadAccessSharedImageCHROMIUM();
EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
}
+
+TEST_F(GLES2ImplementationTest, EnableiOES) {
+ struct Cmds {
+ cmds::EnableiOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->EnableiOES(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, DisableiOES) {
+ struct Cmds {
+ cmds::DisableiOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2);
+
+ gl_->DisableiOES(1, 2);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendEquationiOES) {
+ struct Cmds {
+ cmds::BlendEquationiOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, GL_FUNC_SUBTRACT);
+
+ gl_->BlendEquationiOES(1, GL_FUNC_SUBTRACT);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendEquationSeparateiOES) {
+ struct Cmds {
+ cmds::BlendEquationSeparateiOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, GL_FUNC_SUBTRACT, GL_FUNC_SUBTRACT);
+
+ gl_->BlendEquationSeparateiOES(1, GL_FUNC_SUBTRACT, GL_FUNC_SUBTRACT);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendFunciOES) {
+ struct Cmds {
+ cmds::BlendFunciOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3);
+
+ gl_->BlendFunciOES(1, 2, 3);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, BlendFuncSeparateiOES) {
+ struct Cmds {
+ cmds::BlendFuncSeparateiOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, 2, 3, 4, 5);
+
+ gl_->BlendFuncSeparateiOES(1, 2, 3, 4, 5);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
+
+TEST_F(GLES2ImplementationTest, ColorMaskiOES) {
+ struct Cmds {
+ cmds::ColorMaskiOES cmd;
+ };
+ Cmds expected;
+ expected.cmd.Init(1, true, true, true, true);
+
+ gl_->ColorMaskiOES(1, true, true, true, true);
+ EXPECT_EQ(0, memcmp(&expected, commands_, sizeof(expected)));
+}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_IMPLEMENTATION_UNITTEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
index 11954f6a4c4..a6cfcf3b536 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_autogen.h
@@ -235,6 +235,7 @@ virtual void GetAttachedShaders(GLuint program,
GLuint* shaders) = 0;
virtual GLint GetAttribLocation(GLuint program, const char* name) = 0;
virtual void GetBooleanv(GLenum pname, GLboolean* params) = 0;
+virtual void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) = 0;
virtual void GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) = 0;
@@ -967,4 +968,22 @@ virtual void BeginSharedImageAccessDirectCHROMIUM(GLuint texture,
virtual void EndSharedImageAccessDirectCHROMIUM(GLuint texture) = 0;
virtual void BeginBatchReadAccessSharedImageCHROMIUM() = 0;
virtual void EndBatchReadAccessSharedImageCHROMIUM() = 0;
+virtual void EnableiOES(GLenum target, GLuint index) = 0;
+virtual void DisableiOES(GLenum target, GLuint index) = 0;
+virtual void BlendEquationiOES(GLuint buf, GLenum mode) = 0;
+virtual void BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) = 0;
+virtual void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) = 0;
+virtual void BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) = 0;
+virtual void ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) = 0;
+virtual GLboolean IsEnablediOES(GLenum target, GLuint index) = 0;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
index af3f5723eba..0af83a4f52b 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_autogen.h
@@ -232,6 +232,7 @@ void GetAttachedShaders(GLuint program,
GLuint* shaders) override;
GLint GetAttribLocation(GLuint program, const char* name) override;
void GetBooleanv(GLenum pname, GLboolean* params) override;
+void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) override;
void GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) override;
@@ -937,4 +938,22 @@ void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
void BeginBatchReadAccessSharedImageCHROMIUM() override;
void EndBatchReadAccessSharedImageCHROMIUM() override;
+void EnableiOES(GLenum target, GLuint index) override;
+void DisableiOES(GLenum target, GLuint index) override;
+void BlendEquationiOES(GLuint buf, GLenum mode) override;
+void BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) override;
+void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) override;
+void BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) override;
+void ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) override;
+GLboolean IsEnablediOES(GLenum target, GLuint index) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
index 1ba3ccf0850..2c8542ba0f0 100644
--- a/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_interface_stub_impl_autogen.h
@@ -274,6 +274,9 @@ GLint GLES2InterfaceStub::GetAttribLocation(GLuint /* program */,
}
void GLES2InterfaceStub::GetBooleanv(GLenum /* pname */,
GLboolean* /* params */) {}
+void GLES2InterfaceStub::GetBooleani_v(GLenum /* pname */,
+ GLuint /* index */,
+ GLboolean* /* data */) {}
void GLES2InterfaceStub::GetBufferParameteri64v(GLenum /* target */,
GLenum /* pname */,
GLint64* /* params */) {}
@@ -1251,4 +1254,28 @@ void GLES2InterfaceStub::EndSharedImageAccessDirectCHROMIUM(
GLuint /* texture */) {}
void GLES2InterfaceStub::BeginBatchReadAccessSharedImageCHROMIUM() {}
void GLES2InterfaceStub::EndBatchReadAccessSharedImageCHROMIUM() {}
+void GLES2InterfaceStub::EnableiOES(GLenum /* target */, GLuint /* index */) {}
+void GLES2InterfaceStub::DisableiOES(GLenum /* target */, GLuint /* index */) {}
+void GLES2InterfaceStub::BlendEquationiOES(GLuint /* buf */,
+ GLenum /* mode */) {}
+void GLES2InterfaceStub::BlendEquationSeparateiOES(GLuint /* buf */,
+ GLenum /* modeRGB */,
+ GLenum /* modeAlpha */) {}
+void GLES2InterfaceStub::BlendFunciOES(GLuint /* buf */,
+ GLenum /* src */,
+ GLenum /* dst */) {}
+void GLES2InterfaceStub::BlendFuncSeparateiOES(GLuint /* buf */,
+ GLenum /* srcRGB */,
+ GLenum /* dstRGB */,
+ GLenum /* srcAlpha */,
+ GLenum /* dstAlpha */) {}
+void GLES2InterfaceStub::ColorMaskiOES(GLuint /* buf */,
+ GLboolean /* r */,
+ GLboolean /* g */,
+ GLboolean /* b */,
+ GLboolean /* a */) {}
+GLboolean GLES2InterfaceStub::IsEnablediOES(GLenum /* target */,
+ GLuint /* index */) {
+ return 0;
+}
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_INTERFACE_STUB_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
index 5091689bd0e..d3d3e45c124 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_autogen.h
@@ -232,6 +232,7 @@ void GetAttachedShaders(GLuint program,
GLuint* shaders) override;
GLint GetAttribLocation(GLuint program, const char* name) override;
void GetBooleanv(GLenum pname, GLboolean* params) override;
+void GetBooleani_v(GLenum pname, GLuint index, GLboolean* data) override;
void GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) override;
@@ -937,4 +938,22 @@ void BeginSharedImageAccessDirectCHROMIUM(GLuint texture, GLenum mode) override;
void EndSharedImageAccessDirectCHROMIUM(GLuint texture) override;
void BeginBatchReadAccessSharedImageCHROMIUM() override;
void EndBatchReadAccessSharedImageCHROMIUM() override;
+void EnableiOES(GLenum target, GLuint index) override;
+void DisableiOES(GLenum target, GLuint index) override;
+void BlendEquationiOES(GLuint buf, GLenum mode) override;
+void BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) override;
+void BlendFunciOES(GLuint buf, GLenum src, GLenum dst) override;
+void BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) override;
+void ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) override;
+GLboolean IsEnablediOES(GLenum target, GLuint index) override;
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
index ce0e76e739b..bc4518d66f7 100644
--- a/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
+++ b/chromium/gpu/command_buffer/client/gles2_trace_implementation_impl_autogen.h
@@ -592,6 +592,13 @@ void GLES2TraceImplementation::GetBooleanv(GLenum pname, GLboolean* params) {
gl_->GetBooleanv(pname, params);
}
+void GLES2TraceImplementation::GetBooleani_v(GLenum pname,
+ GLuint index,
+ GLboolean* data) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::GetBooleani_v");
+ gl_->GetBooleani_v(pname, index, data);
+}
+
void GLES2TraceImplementation::GetBufferParameteri64v(GLenum target,
GLenum pname,
GLint64* params) {
@@ -2651,4 +2658,56 @@ void GLES2TraceImplementation::EndBatchReadAccessSharedImageCHROMIUM() {
gl_->EndBatchReadAccessSharedImageCHROMIUM();
}
+void GLES2TraceImplementation::EnableiOES(GLenum target, GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::EnableiOES");
+ gl_->EnableiOES(target, index);
+}
+
+void GLES2TraceImplementation::DisableiOES(GLenum target, GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::DisableiOES");
+ gl_->DisableiOES(target, index);
+}
+
+void GLES2TraceImplementation::BlendEquationiOES(GLuint buf, GLenum mode) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquationiOES");
+ gl_->BlendEquationiOES(buf, mode);
+}
+
+void GLES2TraceImplementation::BlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendEquationSeparateiOES");
+ gl_->BlendEquationSeparateiOES(buf, modeRGB, modeAlpha);
+}
+
+void GLES2TraceImplementation::BlendFunciOES(GLuint buf,
+ GLenum src,
+ GLenum dst) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFunciOES");
+ gl_->BlendFunciOES(buf, src, dst);
+}
+
+void GLES2TraceImplementation::BlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::BlendFuncSeparateiOES");
+ gl_->BlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+}
+
+void GLES2TraceImplementation::ColorMaskiOES(GLuint buf,
+ GLboolean r,
+ GLboolean g,
+ GLboolean b,
+ GLboolean a) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::ColorMaskiOES");
+ gl_->ColorMaskiOES(buf, r, g, b, a);
+}
+
+GLboolean GLES2TraceImplementation::IsEnablediOES(GLenum target, GLuint index) {
+ TRACE_EVENT_BINARY_EFFICIENT0("gpu", "GLES2Trace::IsEnablediOES");
+ return gl_->IsEnablediOES(target, index);
+}
+
#endif // GPU_COMMAND_BUFFER_CLIENT_GLES2_TRACE_IMPLEMENTATION_IMPL_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/client/implementation_base.cc b/chromium/gpu/command_buffer/client/implementation_base.cc
index 15ae10c9ad1..a2404fd65a7 100644
--- a/chromium/gpu/command_buffer/client/implementation_base.cc
+++ b/chromium/gpu/command_buffer/client/implementation_base.cc
@@ -7,6 +7,7 @@
#include <algorithm>
#include "base/bind.h"
+#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/trace_event.h"
diff --git a/chromium/gpu/command_buffer/client/raster_implementation.cc b/chromium/gpu/command_buffer/client/raster_implementation.cc
index 92057b8752f..f1b93c14f5f 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation.cc
@@ -991,10 +991,6 @@ void RasterImplementation::GetQueryObjectui64vEXT(GLuint id,
void* RasterImplementation::MapRasterCHROMIUM(uint32_t size,
uint32_t* size_allocated) {
*size_allocated = 0u;
- if (size < 0) {
- SetGLError(GL_INVALID_VALUE, "glMapRasterCHROMIUM", "negative size");
- return nullptr;
- }
if (raster_mapped_buffer_) {
SetGLError(GL_INVALID_OPERATION, "glMapRasterCHROMIUM", "already mapped");
return nullptr;
@@ -1010,10 +1006,6 @@ void* RasterImplementation::MapRasterCHROMIUM(uint32_t size,
}
void* RasterImplementation::MapFontBuffer(uint32_t size) {
- if (size < 0) {
- SetGLError(GL_INVALID_VALUE, "glMapFontBufferCHROMIUM", "negative size");
- return nullptr;
- }
if (font_mapped_buffer_) {
SetGLError(GL_INVALID_OPERATION, "glMapFontBufferCHROMIUM",
"already mapped");
@@ -1036,11 +1028,6 @@ void* RasterImplementation::MapFontBuffer(uint32_t size) {
void RasterImplementation::UnmapRasterCHROMIUM(uint32_t raster_written_size,
uint32_t total_written_size) {
- if (total_written_size < 0) {
- SetGLError(GL_INVALID_VALUE, "glUnmapRasterCHROMIUM",
- "negative written_size");
- return;
- }
if (!raster_mapped_buffer_) {
SetGLError(GL_INVALID_OPERATION, "glUnmapRasterCHROMIUM", "not mapped");
return;
diff --git a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
index 163e7c0a5e9..299dd4f9874 100644
--- a/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
+++ b/chromium/gpu/command_buffer/client/raster_implementation_gles.cc
@@ -39,6 +39,8 @@ GLenum SkColorTypeToGLDataFormat(SkColorType color_type) {
return GL_RGBA;
case kBGRA_8888_SkColorType:
return GL_BGRA_EXT;
+ case kGray_8_SkColorType:
+ return GL_LUMINANCE;
default:
DLOG(ERROR) << "Unknown SkColorType " << color_type;
}
@@ -50,6 +52,7 @@ GLenum SkColorTypeToGLDataType(SkColorType color_type) {
switch (color_type) {
case kRGBA_8888_SkColorType:
case kBGRA_8888_SkColorType:
+ case kGray_8_SkColorType:
return GL_UNSIGNED_BYTE;
default:
DLOG(ERROR) << "Unknown SkColorType " << color_type;
@@ -170,17 +173,19 @@ void RasterImplementationGLES::WritePixels(const gpu::Mailbox& dest_mailbox,
GLuint row_bytes,
const SkImageInfo& src_info,
const void* src_pixels) {
- DCHECK_EQ(row_bytes, src_info.minRowBytes());
+ DCHECK_GE(row_bytes, src_info.minRowBytes());
GLuint texture_id = CreateAndConsumeForGpuRaster(dest_mailbox);
BeginSharedImageAccessDirectCHROMIUM(
texture_id, GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, row_bytes / src_info.bytesPerPixel());
gl_->BindTexture(texture_target, texture_id);
gl_->TexSubImage2D(texture_target, 0, dst_x_offset, dst_y_offset,
src_info.width(), src_info.height(),
SkColorTypeToGLDataFormat(src_info.colorType()),
SkColorTypeToGLDataType(src_info.colorType()), src_pixels);
gl_->BindTexture(texture_target, 0);
+ gl_->PixelStorei(GL_UNPACK_ROW_LENGTH, 0);
EndSharedImageAccessDirectCHROMIUM(texture_id);
DeleteGpuRasterTexture(texture_id);
diff --git a/chromium/gpu/command_buffer/client/ring_buffer.h b/chromium/gpu/command_buffer/client/ring_buffer.h
index f0260979f33..faaef510a9e 100644
--- a/chromium/gpu/command_buffer/client/ring_buffer.h
+++ b/chromium/gpu/command_buffer/client/ring_buffer.h
@@ -10,7 +10,6 @@
#include <stdint.h>
#include "base/containers/circular_deque.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "gpu/gpu_export.h"
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.cc b/chromium/gpu/command_buffer/client/shared_image_interface.cc
index 8b340c00ca3..1830ef08730 100644
--- a/chromium/gpu/command_buffer/client/shared_image_interface.cc
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.cc
@@ -10,4 +10,7 @@ uint32_t SharedImageInterface::UsageForMailbox(const Mailbox& mailbox) {
return 0u;
}
+void SharedImageInterface::NotifyMailboxAdded(const Mailbox& /*mailbox*/,
+ uint32_t /*usage*/) {}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/shared_image_interface.h b/chromium/gpu/command_buffer/client/shared_image_interface.h
index 18369b51acd..ade24e373b9 100644
--- a/chromium/gpu/command_buffer/client/shared_image_interface.h
+++ b/chromium/gpu/command_buffer/client/shared_image_interface.h
@@ -149,7 +149,9 @@ class GPU_EXPORT SharedImageInterface {
// wrapping it in GpuMemoryBufferHandle and then creating GpuMemoryBuffer from
// that handle.
virtual void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) = 0;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) = 0;
virtual void ReleaseSysmemBufferCollection(
gfx::SysmemBufferCollectionId id) = 0;
@@ -163,6 +165,11 @@ class GPU_EXPORT SharedImageInterface {
// commands on this interface have executed on the service side.
virtual SyncToken GenVerifiedSyncToken() = 0;
+ // Wait on this SyncToken to be released before executing new commands on
+ // this interface on the service side. This is an async wait for all the
+ // previous commands which will be sent to server on the next flush().
+ virtual void WaitSyncToken(const gpu::SyncToken& sync_token) = 0;
+
// Flush the SharedImageInterface, issuing any deferred IPCs.
virtual void Flush() = 0;
@@ -181,6 +188,10 @@ class GPU_EXPORT SharedImageInterface {
// Provides the usage flags supported by the given |mailbox|. This must have
// been created using a SharedImageInterface on the same channel.
virtual uint32_t UsageForMailbox(const Mailbox& mailbox);
+
+ // Informs that existing |mailbox| with |usage| can be passed to
+ // DestroySharedImage().
+ virtual void NotifyMailboxAdded(const Mailbox& mailbox, uint32_t usage);
};
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
index 8ca8609db9e..6bd739d0cc6 100644
--- a/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
+++ b/chromium/gpu/command_buffer/client/transfer_buffer_unittest.cc
@@ -12,6 +12,7 @@
#include <memory>
#include "base/compiler_specific.h"
+#include "base/memory/aligned_memory.h"
#include "gpu/command_buffer/client/client_test_helper.h"
#include "gpu/command_buffer/client/cmd_buffer_helper.h"
#include "gpu/command_buffer/common/command_buffer.h"
@@ -218,11 +219,11 @@ TEST_F(TransferBufferTest, TooLargeAllocation) {
TEST_F(TransferBufferTest, MemoryAlignmentAfterZeroAllocation) {
Initialize();
void* ptr = transfer_buffer_->Alloc(0);
- EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
+ EXPECT_TRUE(base::IsAligned(ptr, kAlignment));
transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
// Check that the pointer is aligned on the following allocation.
ptr = transfer_buffer_->Alloc(4);
- EXPECT_EQ((reinterpret_cast<uintptr_t>(ptr) & (kAlignment - 1)), 0u);
+ EXPECT_TRUE(base::IsAligned(ptr, kAlignment));
transfer_buffer_->FreePendingToken(ptr, helper_->InsertToken());
}
diff --git a/chromium/gpu/command_buffer/common/BUILD.gn b/chromium/gpu/command_buffer/common/BUILD.gn
index bcf81ea98b9..9696af1c3a3 100644
--- a/chromium/gpu/command_buffer/common/BUILD.gn
+++ b/chromium/gpu/command_buffer/common/BUILD.gn
@@ -34,6 +34,14 @@ group("raster") {
}
}
+group("mailbox") {
+ if (is_component_build) {
+ public_deps = [ "//gpu:mailbox" ]
+ } else {
+ public_deps = [ ":mailbox_sources" ]
+ }
+}
+
group("webgpu") {
public_deps = [ ":webgpu_sources" ]
}
@@ -68,8 +76,6 @@ jumbo_source_set("common_sources") {
"gpu_memory_buffer_support.h",
"id_allocator.cc",
"id_allocator.h",
- "mailbox.cc",
- "mailbox.h",
"mailbox_holder.cc",
"mailbox_holder.h",
"presentation_feedback_utils.cc",
@@ -92,6 +98,7 @@ jumbo_source_set("common_sources") {
configs += [ "//gpu:gpu_implementation" ]
public_deps = [
+ ":mailbox",
"//base/util/type_safety",
"//mojo/public/cpp/system",
"//ui/gfx:memory_buffer",
@@ -105,6 +112,16 @@ jumbo_source_set("common_sources") {
configs += [ "//third_party/khronos:khronos_headers" ]
}
+source_set("mailbox_sources") {
+ visibility = [ "//gpu/*" ]
+ defines = [ "IS_GPU_MAILBOX_IMPL" ]
+ sources = [
+ "mailbox.cc",
+ "mailbox.h",
+ ]
+ deps = [ "//base" ]
+}
+
source_set("gles2_sources") {
visibility = [ "//gpu/*" ]
diff --git a/chromium/gpu/command_buffer/common/cmd_buffer_common.h b/chromium/gpu/command_buffer/common/cmd_buffer_common.h
index ae83e190a06..a4a01c35c3b 100644
--- a/chromium/gpu/command_buffer/common/cmd_buffer_common.h
+++ b/chromium/gpu/command_buffer/common/cmd_buffer_common.h
@@ -10,7 +10,7 @@
#include <stddef.h>
#include <stdint.h>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "gpu/command_buffer/common/bitfield_helpers.h"
#include "gpu/gpu_export.h"
diff --git a/chromium/gpu/command_buffer/common/discardable_handle.cc b/chromium/gpu/command_buffer/common/discardable_handle.cc
index c32bd8c20ba..8540d91ac28 100644
--- a/chromium/gpu/command_buffer/common/discardable_handle.cc
+++ b/chromium/gpu/command_buffer/common/discardable_handle.cc
@@ -5,6 +5,7 @@
#include "gpu/command_buffer/common/discardable_handle.h"
#include "base/atomicops.h"
+#include "base/logging.h"
#include "gpu/command_buffer/common/buffer.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format.h b/chromium/gpu/command_buffer/common/gles2_cmd_format.h
index 6bcbe51c0f3..ecb2dc4a03d 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format.h
@@ -12,7 +12,7 @@
#include <string.h>
#include "base/atomicops.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/rand_util.h"
#include "base/trace_event/trace_event.h"
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
index 05a48970424..2fc6747211d 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_autogen.h
@@ -4155,6 +4155,61 @@ static_assert(offsetof(GetBooleanv, params_shm_id) == 8,
static_assert(offsetof(GetBooleanv, params_shm_offset) == 12,
"offset of GetBooleanv params_shm_offset should be 12");
+struct GetBooleani_v {
+ typedef GetBooleani_v ValueType;
+ static const CommandId kCmdId = kGetBooleani_v;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef SizedResult<GLboolean> Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _pname,
+ GLuint _index,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ SetHeader();
+ pname = _pname;
+ index = _index;
+ data_shm_id = _data_shm_id;
+ data_shm_offset = _data_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _pname,
+ GLuint _index,
+ uint32_t _data_shm_id,
+ uint32_t _data_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_pname, _index, _data_shm_id,
+ _data_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t pname;
+ uint32_t index;
+ uint32_t data_shm_id;
+ uint32_t data_shm_offset;
+};
+
+static_assert(sizeof(GetBooleani_v) == 20,
+ "size of GetBooleani_v should be 20");
+static_assert(offsetof(GetBooleani_v, header) == 0,
+ "offset of GetBooleani_v header should be 0");
+static_assert(offsetof(GetBooleani_v, pname) == 4,
+ "offset of GetBooleani_v pname should be 4");
+static_assert(offsetof(GetBooleani_v, index) == 8,
+ "offset of GetBooleani_v index should be 8");
+static_assert(offsetof(GetBooleani_v, data_shm_id) == 12,
+ "offset of GetBooleani_v data_shm_id should be 12");
+static_assert(offsetof(GetBooleani_v, data_shm_offset) == 16,
+ "offset of GetBooleani_v data_shm_offset should be 16");
+
struct GetBufferParameteri64v {
typedef GetBufferParameteri64v ValueType;
static const CommandId kCmdId = kGetBufferParameteri64v;
@@ -16777,4 +16832,367 @@ static_assert(
offsetof(EndBatchReadAccessSharedImageCHROMIUM, header) == 0,
"offset of EndBatchReadAccessSharedImageCHROMIUM header should be 0");
+struct EnableiOES {
+ typedef EnableiOES ValueType;
+ static const CommandId kCmdId = kEnableiOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _index) {
+ SetHeader();
+ target = _target;
+ index = _index;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _index) {
+ static_cast<ValueType*>(cmd)->Init(_target, _index);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t index;
+};
+
+static_assert(sizeof(EnableiOES) == 12, "size of EnableiOES should be 12");
+static_assert(offsetof(EnableiOES, header) == 0,
+ "offset of EnableiOES header should be 0");
+static_assert(offsetof(EnableiOES, target) == 4,
+ "offset of EnableiOES target should be 4");
+static_assert(offsetof(EnableiOES, index) == 8,
+ "offset of EnableiOES index should be 8");
+
+struct DisableiOES {
+ typedef DisableiOES ValueType;
+ static const CommandId kCmdId = kDisableiOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target, GLuint _index) {
+ SetHeader();
+ target = _target;
+ index = _index;
+ }
+
+ void* Set(void* cmd, GLenum _target, GLuint _index) {
+ static_cast<ValueType*>(cmd)->Init(_target, _index);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t index;
+};
+
+static_assert(sizeof(DisableiOES) == 12, "size of DisableiOES should be 12");
+static_assert(offsetof(DisableiOES, header) == 0,
+ "offset of DisableiOES header should be 0");
+static_assert(offsetof(DisableiOES, target) == 4,
+ "offset of DisableiOES target should be 4");
+static_assert(offsetof(DisableiOES, index) == 8,
+ "offset of DisableiOES index should be 8");
+
+struct BlendEquationiOES {
+ typedef BlendEquationiOES ValueType;
+ static const CommandId kCmdId = kBlendEquationiOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buf, GLenum _mode) {
+ SetHeader();
+ buf = _buf;
+ mode = _mode;
+ }
+
+ void* Set(void* cmd, GLuint _buf, GLenum _mode) {
+ static_cast<ValueType*>(cmd)->Init(_buf, _mode);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buf;
+ uint32_t mode;
+};
+
+static_assert(sizeof(BlendEquationiOES) == 12,
+ "size of BlendEquationiOES should be 12");
+static_assert(offsetof(BlendEquationiOES, header) == 0,
+ "offset of BlendEquationiOES header should be 0");
+static_assert(offsetof(BlendEquationiOES, buf) == 4,
+ "offset of BlendEquationiOES buf should be 4");
+static_assert(offsetof(BlendEquationiOES, mode) == 8,
+ "offset of BlendEquationiOES mode should be 8");
+
+struct BlendEquationSeparateiOES {
+ typedef BlendEquationSeparateiOES ValueType;
+ static const CommandId kCmdId = kBlendEquationSeparateiOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buf, GLenum _modeRGB, GLenum _modeAlpha) {
+ SetHeader();
+ buf = _buf;
+ modeRGB = _modeRGB;
+ modeAlpha = _modeAlpha;
+ }
+
+ void* Set(void* cmd, GLuint _buf, GLenum _modeRGB, GLenum _modeAlpha) {
+ static_cast<ValueType*>(cmd)->Init(_buf, _modeRGB, _modeAlpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buf;
+ uint32_t modeRGB;
+ uint32_t modeAlpha;
+};
+
+static_assert(sizeof(BlendEquationSeparateiOES) == 16,
+ "size of BlendEquationSeparateiOES should be 16");
+static_assert(offsetof(BlendEquationSeparateiOES, header) == 0,
+ "offset of BlendEquationSeparateiOES header should be 0");
+static_assert(offsetof(BlendEquationSeparateiOES, buf) == 4,
+ "offset of BlendEquationSeparateiOES buf should be 4");
+static_assert(offsetof(BlendEquationSeparateiOES, modeRGB) == 8,
+ "offset of BlendEquationSeparateiOES modeRGB should be 8");
+static_assert(offsetof(BlendEquationSeparateiOES, modeAlpha) == 12,
+ "offset of BlendEquationSeparateiOES modeAlpha should be 12");
+
+struct BlendFunciOES {
+ typedef BlendFunciOES ValueType;
+ static const CommandId kCmdId = kBlendFunciOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buf, GLenum _src, GLenum _dst) {
+ SetHeader();
+ buf = _buf;
+ src = _src;
+ dst = _dst;
+ }
+
+ void* Set(void* cmd, GLuint _buf, GLenum _src, GLenum _dst) {
+ static_cast<ValueType*>(cmd)->Init(_buf, _src, _dst);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buf;
+ uint32_t src;
+ uint32_t dst;
+};
+
+static_assert(sizeof(BlendFunciOES) == 16,
+ "size of BlendFunciOES should be 16");
+static_assert(offsetof(BlendFunciOES, header) == 0,
+ "offset of BlendFunciOES header should be 0");
+static_assert(offsetof(BlendFunciOES, buf) == 4,
+ "offset of BlendFunciOES buf should be 4");
+static_assert(offsetof(BlendFunciOES, src) == 8,
+ "offset of BlendFunciOES src should be 8");
+static_assert(offsetof(BlendFunciOES, dst) == 12,
+ "offset of BlendFunciOES dst should be 12");
+
+struct BlendFuncSeparateiOES {
+ typedef BlendFuncSeparateiOES ValueType;
+ static const CommandId kCmdId = kBlendFuncSeparateiOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buf,
+ GLenum _srcRGB,
+ GLenum _dstRGB,
+ GLenum _srcAlpha,
+ GLenum _dstAlpha) {
+ SetHeader();
+ buf = _buf;
+ srcRGB = _srcRGB;
+ dstRGB = _dstRGB;
+ srcAlpha = _srcAlpha;
+ dstAlpha = _dstAlpha;
+ }
+
+ void* Set(void* cmd,
+ GLuint _buf,
+ GLenum _srcRGB,
+ GLenum _dstRGB,
+ GLenum _srcAlpha,
+ GLenum _dstAlpha) {
+ static_cast<ValueType*>(cmd)->Init(_buf, _srcRGB, _dstRGB, _srcAlpha,
+ _dstAlpha);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buf;
+ uint32_t srcRGB;
+ uint32_t dstRGB;
+ uint32_t srcAlpha;
+ uint32_t dstAlpha;
+};
+
+static_assert(sizeof(BlendFuncSeparateiOES) == 24,
+ "size of BlendFuncSeparateiOES should be 24");
+static_assert(offsetof(BlendFuncSeparateiOES, header) == 0,
+ "offset of BlendFuncSeparateiOES header should be 0");
+static_assert(offsetof(BlendFuncSeparateiOES, buf) == 4,
+ "offset of BlendFuncSeparateiOES buf should be 4");
+static_assert(offsetof(BlendFuncSeparateiOES, srcRGB) == 8,
+ "offset of BlendFuncSeparateiOES srcRGB should be 8");
+static_assert(offsetof(BlendFuncSeparateiOES, dstRGB) == 12,
+ "offset of BlendFuncSeparateiOES dstRGB should be 12");
+static_assert(offsetof(BlendFuncSeparateiOES, srcAlpha) == 16,
+ "offset of BlendFuncSeparateiOES srcAlpha should be 16");
+static_assert(offsetof(BlendFuncSeparateiOES, dstAlpha) == 20,
+ "offset of BlendFuncSeparateiOES dstAlpha should be 20");
+
+struct ColorMaskiOES {
+ typedef ColorMaskiOES ValueType;
+ static const CommandId kCmdId = kColorMaskiOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLuint _buf,
+ GLboolean _r,
+ GLboolean _g,
+ GLboolean _b,
+ GLboolean _a) {
+ SetHeader();
+ buf = _buf;
+ r = _r;
+ g = _g;
+ b = _b;
+ a = _a;
+ }
+
+ void* Set(void* cmd,
+ GLuint _buf,
+ GLboolean _r,
+ GLboolean _g,
+ GLboolean _b,
+ GLboolean _a) {
+ static_cast<ValueType*>(cmd)->Init(_buf, _r, _g, _b, _a);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t buf;
+ uint32_t r;
+ uint32_t g;
+ uint32_t b;
+ uint32_t a;
+};
+
+static_assert(sizeof(ColorMaskiOES) == 24,
+ "size of ColorMaskiOES should be 24");
+static_assert(offsetof(ColorMaskiOES, header) == 0,
+ "offset of ColorMaskiOES header should be 0");
+static_assert(offsetof(ColorMaskiOES, buf) == 4,
+ "offset of ColorMaskiOES buf should be 4");
+static_assert(offsetof(ColorMaskiOES, r) == 8,
+ "offset of ColorMaskiOES r should be 8");
+static_assert(offsetof(ColorMaskiOES, g) == 12,
+ "offset of ColorMaskiOES g should be 12");
+static_assert(offsetof(ColorMaskiOES, b) == 16,
+ "offset of ColorMaskiOES b should be 16");
+static_assert(offsetof(ColorMaskiOES, a) == 20,
+ "offset of ColorMaskiOES a should be 20");
+
+struct IsEnablediOES {
+ typedef IsEnablediOES ValueType;
+ static const CommandId kCmdId = kIsEnablediOES;
+ static const cmd::ArgFlags kArgFlags = cmd::kFixed;
+ static const uint8_t cmd_flags = CMD_FLAG_SET_TRACE_LEVEL(3);
+
+ typedef uint32_t Result;
+
+ static uint32_t ComputeSize() {
+ return static_cast<uint32_t>(sizeof(ValueType)); // NOLINT
+ }
+
+ void SetHeader() { header.SetCmd<ValueType>(); }
+
+ void Init(GLenum _target,
+ GLuint _index,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ SetHeader();
+ target = _target;
+ index = _index;
+ result_shm_id = _result_shm_id;
+ result_shm_offset = _result_shm_offset;
+ }
+
+ void* Set(void* cmd,
+ GLenum _target,
+ GLuint _index,
+ uint32_t _result_shm_id,
+ uint32_t _result_shm_offset) {
+ static_cast<ValueType*>(cmd)->Init(_target, _index, _result_shm_id,
+ _result_shm_offset);
+ return NextCmdAddress<ValueType>(cmd);
+ }
+
+ gpu::CommandHeader header;
+ uint32_t target;
+ uint32_t index;
+ uint32_t result_shm_id;
+ uint32_t result_shm_offset;
+};
+
+static_assert(sizeof(IsEnablediOES) == 20,
+ "size of IsEnablediOES should be 20");
+static_assert(offsetof(IsEnablediOES, header) == 0,
+ "offset of IsEnablediOES header should be 0");
+static_assert(offsetof(IsEnablediOES, target) == 4,
+ "offset of IsEnablediOES target should be 4");
+static_assert(offsetof(IsEnablediOES, index) == 8,
+ "offset of IsEnablediOES index should be 8");
+static_assert(offsetof(IsEnablediOES, result_shm_id) == 12,
+ "offset of IsEnablediOES result_shm_id should be 12");
+static_assert(offsetof(IsEnablediOES, result_shm_offset) == 16,
+ "offset of IsEnablediOES result_shm_offset should be 16");
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
index 791783b0664..a07cf3607ae 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_format_test_autogen.h
@@ -1327,6 +1327,21 @@ TEST_F(GLES2FormatTest, GetBooleanv) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, GetBooleani_v) {
+ cmds::GetBooleani_v& cmd = *GetBufferAs<cmds::GetBooleani_v>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::GetBooleani_v::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.pname);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.data_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.data_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
TEST_F(GLES2FormatTest, GetBufferParameteri64v) {
cmds::GetBufferParameteri64v& cmd =
*GetBufferAs<cmds::GetBufferParameteri64v>();
@@ -5558,4 +5573,116 @@ TEST_F(GLES2FormatTest, EndBatchReadAccessSharedImageCHROMIUM) {
CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
}
+TEST_F(GLES2FormatTest, EnableiOES) {
+ cmds::EnableiOES& cmd = *GetBufferAs<cmds::EnableiOES>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::EnableiOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, DisableiOES) {
+ cmds::DisableiOES& cmd = *GetBufferAs<cmds::DisableiOES>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::DisableiOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendEquationiOES) {
+ cmds::BlendEquationiOES& cmd = *GetBufferAs<cmds::BlendEquationiOES>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLenum>(12));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendEquationiOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buf);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.mode);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendEquationSeparateiOES) {
+ cmds::BlendEquationSeparateiOES& cmd =
+ *GetBufferAs<cmds::BlendEquationSeparateiOES>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
+ static_cast<GLenum>(12), static_cast<GLenum>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendEquationSeparateiOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buf);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.modeRGB);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.modeAlpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendFunciOES) {
+ cmds::BlendFunciOES& cmd = *GetBufferAs<cmds::BlendFunciOES>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
+ static_cast<GLenum>(12), static_cast<GLenum>(13));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendFunciOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buf);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.src);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.dst);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, BlendFuncSeparateiOES) {
+ cmds::BlendFuncSeparateiOES& cmd =
+ *GetBufferAs<cmds::BlendFuncSeparateiOES>();
+ void* next_cmd = cmd.Set(&cmd, static_cast<GLuint>(11),
+ static_cast<GLenum>(12), static_cast<GLenum>(13),
+ static_cast<GLenum>(14), static_cast<GLenum>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::BlendFuncSeparateiOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buf);
+ EXPECT_EQ(static_cast<GLenum>(12), cmd.srcRGB);
+ EXPECT_EQ(static_cast<GLenum>(13), cmd.dstRGB);
+ EXPECT_EQ(static_cast<GLenum>(14), cmd.srcAlpha);
+ EXPECT_EQ(static_cast<GLenum>(15), cmd.dstAlpha);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, ColorMaskiOES) {
+ cmds::ColorMaskiOES& cmd = *GetBufferAs<cmds::ColorMaskiOES>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLuint>(11), static_cast<GLboolean>(12),
+ static_cast<GLboolean>(13), static_cast<GLboolean>(14),
+ static_cast<GLboolean>(15));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::ColorMaskiOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLuint>(11), cmd.buf);
+ EXPECT_EQ(static_cast<GLboolean>(12), cmd.r);
+ EXPECT_EQ(static_cast<GLboolean>(13), cmd.g);
+ EXPECT_EQ(static_cast<GLboolean>(14), cmd.b);
+ EXPECT_EQ(static_cast<GLboolean>(15), cmd.a);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
+TEST_F(GLES2FormatTest, IsEnablediOES) {
+ cmds::IsEnablediOES& cmd = *GetBufferAs<cmds::IsEnablediOES>();
+ void* next_cmd =
+ cmd.Set(&cmd, static_cast<GLenum>(11), static_cast<GLuint>(12),
+ static_cast<uint32_t>(13), static_cast<uint32_t>(14));
+ EXPECT_EQ(static_cast<uint32_t>(cmds::IsEnablediOES::kCmdId),
+ cmd.header.command);
+ EXPECT_EQ(sizeof(cmd), cmd.header.size * 4u);
+ EXPECT_EQ(static_cast<GLenum>(11), cmd.target);
+ EXPECT_EQ(static_cast<GLuint>(12), cmd.index);
+ EXPECT_EQ(static_cast<uint32_t>(13), cmd.result_shm_id);
+ EXPECT_EQ(static_cast<uint32_t>(14), cmd.result_shm_offset);
+ CheckBytesWrittenMatchesExpectedSize(next_cmd, sizeof(cmd));
+}
+
#endif // GPU_COMMAND_BUFFER_COMMON_GLES2_CMD_FORMAT_TEST_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
index 5ed67e6e384..5d2242b00d5 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_ids_autogen.h
@@ -98,255 +98,264 @@
OP(GetAttachedShaders) /* 339 */ \
OP(GetAttribLocation) /* 340 */ \
OP(GetBooleanv) /* 341 */ \
- OP(GetBufferParameteri64v) /* 342 */ \
- OP(GetBufferParameteriv) /* 343 */ \
- OP(GetError) /* 344 */ \
- OP(GetFloatv) /* 345 */ \
- OP(GetFragDataLocation) /* 346 */ \
- OP(GetFramebufferAttachmentParameteriv) /* 347 */ \
- OP(GetInteger64v) /* 348 */ \
- OP(GetIntegeri_v) /* 349 */ \
- OP(GetInteger64i_v) /* 350 */ \
- OP(GetIntegerv) /* 351 */ \
- OP(GetInternalformativ) /* 352 */ \
- OP(GetProgramiv) /* 353 */ \
- OP(GetProgramInfoLog) /* 354 */ \
- OP(GetRenderbufferParameteriv) /* 355 */ \
- OP(GetSamplerParameterfv) /* 356 */ \
- OP(GetSamplerParameteriv) /* 357 */ \
- OP(GetShaderiv) /* 358 */ \
- OP(GetShaderInfoLog) /* 359 */ \
- OP(GetShaderPrecisionFormat) /* 360 */ \
- OP(GetShaderSource) /* 361 */ \
- OP(GetString) /* 362 */ \
- OP(GetSynciv) /* 363 */ \
- OP(GetTexParameterfv) /* 364 */ \
- OP(GetTexParameteriv) /* 365 */ \
- OP(GetTransformFeedbackVarying) /* 366 */ \
- OP(GetUniformBlockIndex) /* 367 */ \
- OP(GetUniformfv) /* 368 */ \
- OP(GetUniformiv) /* 369 */ \
- OP(GetUniformuiv) /* 370 */ \
- OP(GetUniformIndices) /* 371 */ \
- OP(GetUniformLocation) /* 372 */ \
- OP(GetVertexAttribfv) /* 373 */ \
- OP(GetVertexAttribiv) /* 374 */ \
- OP(GetVertexAttribIiv) /* 375 */ \
- OP(GetVertexAttribIuiv) /* 376 */ \
- OP(GetVertexAttribPointerv) /* 377 */ \
- OP(Hint) /* 378 */ \
- OP(InvalidateFramebufferImmediate) /* 379 */ \
- OP(InvalidateSubFramebufferImmediate) /* 380 */ \
- OP(IsBuffer) /* 381 */ \
- OP(IsEnabled) /* 382 */ \
- OP(IsFramebuffer) /* 383 */ \
- OP(IsProgram) /* 384 */ \
- OP(IsRenderbuffer) /* 385 */ \
- OP(IsSampler) /* 386 */ \
- OP(IsShader) /* 387 */ \
- OP(IsSync) /* 388 */ \
- OP(IsTexture) /* 389 */ \
- OP(IsTransformFeedback) /* 390 */ \
- OP(LineWidth) /* 391 */ \
- OP(LinkProgram) /* 392 */ \
- OP(PauseTransformFeedback) /* 393 */ \
- OP(PixelStorei) /* 394 */ \
- OP(PolygonOffset) /* 395 */ \
- OP(ReadBuffer) /* 396 */ \
- OP(ReadPixels) /* 397 */ \
- OP(ReleaseShaderCompiler) /* 398 */ \
- OP(RenderbufferStorage) /* 399 */ \
- OP(ResumeTransformFeedback) /* 400 */ \
- OP(SampleCoverage) /* 401 */ \
- OP(SamplerParameterf) /* 402 */ \
- OP(SamplerParameterfvImmediate) /* 403 */ \
- OP(SamplerParameteri) /* 404 */ \
- OP(SamplerParameterivImmediate) /* 405 */ \
- OP(Scissor) /* 406 */ \
- OP(ShaderBinary) /* 407 */ \
- OP(ShaderSourceBucket) /* 408 */ \
- OP(MultiDrawBeginCHROMIUM) /* 409 */ \
- OP(MultiDrawEndCHROMIUM) /* 410 */ \
- OP(MultiDrawArraysCHROMIUM) /* 411 */ \
- OP(MultiDrawArraysInstancedCHROMIUM) /* 412 */ \
- OP(MultiDrawArraysInstancedBaseInstanceCHROMIUM) /* 413 */ \
- OP(MultiDrawElementsCHROMIUM) /* 414 */ \
- OP(MultiDrawElementsInstancedCHROMIUM) /* 415 */ \
- OP(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) /* 416 */ \
- OP(StencilFunc) /* 417 */ \
- OP(StencilFuncSeparate) /* 418 */ \
- OP(StencilMask) /* 419 */ \
- OP(StencilMaskSeparate) /* 420 */ \
- OP(StencilOp) /* 421 */ \
- OP(StencilOpSeparate) /* 422 */ \
- OP(TexImage2D) /* 423 */ \
- OP(TexImage3D) /* 424 */ \
- OP(TexParameterf) /* 425 */ \
- OP(TexParameterfvImmediate) /* 426 */ \
- OP(TexParameteri) /* 427 */ \
- OP(TexParameterivImmediate) /* 428 */ \
- OP(TexStorage3D) /* 429 */ \
- OP(TexSubImage2D) /* 430 */ \
- OP(TexSubImage3D) /* 431 */ \
- OP(TransformFeedbackVaryingsBucket) /* 432 */ \
- OP(Uniform1f) /* 433 */ \
- OP(Uniform1fvImmediate) /* 434 */ \
- OP(Uniform1i) /* 435 */ \
- OP(Uniform1ivImmediate) /* 436 */ \
- OP(Uniform1ui) /* 437 */ \
- OP(Uniform1uivImmediate) /* 438 */ \
- OP(Uniform2f) /* 439 */ \
- OP(Uniform2fvImmediate) /* 440 */ \
- OP(Uniform2i) /* 441 */ \
- OP(Uniform2ivImmediate) /* 442 */ \
- OP(Uniform2ui) /* 443 */ \
- OP(Uniform2uivImmediate) /* 444 */ \
- OP(Uniform3f) /* 445 */ \
- OP(Uniform3fvImmediate) /* 446 */ \
- OP(Uniform3i) /* 447 */ \
- OP(Uniform3ivImmediate) /* 448 */ \
- OP(Uniform3ui) /* 449 */ \
- OP(Uniform3uivImmediate) /* 450 */ \
- OP(Uniform4f) /* 451 */ \
- OP(Uniform4fvImmediate) /* 452 */ \
- OP(Uniform4i) /* 453 */ \
- OP(Uniform4ivImmediate) /* 454 */ \
- OP(Uniform4ui) /* 455 */ \
- OP(Uniform4uivImmediate) /* 456 */ \
- OP(UniformBlockBinding) /* 457 */ \
- OP(UniformMatrix2fvImmediate) /* 458 */ \
- OP(UniformMatrix2x3fvImmediate) /* 459 */ \
- OP(UniformMatrix2x4fvImmediate) /* 460 */ \
- OP(UniformMatrix3fvImmediate) /* 461 */ \
- OP(UniformMatrix3x2fvImmediate) /* 462 */ \
- OP(UniformMatrix3x4fvImmediate) /* 463 */ \
- OP(UniformMatrix4fvImmediate) /* 464 */ \
- OP(UniformMatrix4x2fvImmediate) /* 465 */ \
- OP(UniformMatrix4x3fvImmediate) /* 466 */ \
- OP(UseProgram) /* 467 */ \
- OP(ValidateProgram) /* 468 */ \
- OP(VertexAttrib1f) /* 469 */ \
- OP(VertexAttrib1fvImmediate) /* 470 */ \
- OP(VertexAttrib2f) /* 471 */ \
- OP(VertexAttrib2fvImmediate) /* 472 */ \
- OP(VertexAttrib3f) /* 473 */ \
- OP(VertexAttrib3fvImmediate) /* 474 */ \
- OP(VertexAttrib4f) /* 475 */ \
- OP(VertexAttrib4fvImmediate) /* 476 */ \
- OP(VertexAttribI4i) /* 477 */ \
- OP(VertexAttribI4ivImmediate) /* 478 */ \
- OP(VertexAttribI4ui) /* 479 */ \
- OP(VertexAttribI4uivImmediate) /* 480 */ \
- OP(VertexAttribIPointer) /* 481 */ \
- OP(VertexAttribPointer) /* 482 */ \
- OP(Viewport) /* 483 */ \
- OP(WaitSync) /* 484 */ \
- OP(BlitFramebufferCHROMIUM) /* 485 */ \
- OP(RenderbufferStorageMultisampleCHROMIUM) /* 486 */ \
- OP(RenderbufferStorageMultisampleAdvancedAMD) /* 487 */ \
- OP(RenderbufferStorageMultisampleEXT) /* 488 */ \
- OP(FramebufferTexture2DMultisampleEXT) /* 489 */ \
- OP(TexStorage2DEXT) /* 490 */ \
- OP(GenQueriesEXTImmediate) /* 491 */ \
- OP(DeleteQueriesEXTImmediate) /* 492 */ \
- OP(QueryCounterEXT) /* 493 */ \
- OP(BeginQueryEXT) /* 494 */ \
- OP(BeginTransformFeedback) /* 495 */ \
- OP(EndQueryEXT) /* 496 */ \
- OP(EndTransformFeedback) /* 497 */ \
- OP(SetDisjointValueSyncCHROMIUM) /* 498 */ \
- OP(InsertEventMarkerEXT) /* 499 */ \
- OP(PushGroupMarkerEXT) /* 500 */ \
- OP(PopGroupMarkerEXT) /* 501 */ \
- OP(GenVertexArraysOESImmediate) /* 502 */ \
- OP(DeleteVertexArraysOESImmediate) /* 503 */ \
- OP(IsVertexArrayOES) /* 504 */ \
- OP(BindVertexArrayOES) /* 505 */ \
- OP(FramebufferParameteri) /* 506 */ \
- OP(BindImageTexture) /* 507 */ \
- OP(DispatchCompute) /* 508 */ \
- OP(DispatchComputeIndirect) /* 509 */ \
- OP(DrawArraysIndirect) /* 510 */ \
- OP(DrawElementsIndirect) /* 511 */ \
- OP(GetProgramInterfaceiv) /* 512 */ \
- OP(GetProgramResourceIndex) /* 513 */ \
- OP(GetProgramResourceName) /* 514 */ \
- OP(GetProgramResourceiv) /* 515 */ \
- OP(GetProgramResourceLocation) /* 516 */ \
- OP(MemoryBarrierEXT) /* 517 */ \
- OP(MemoryBarrierByRegion) /* 518 */ \
- OP(SwapBuffers) /* 519 */ \
- OP(GetMaxValueInBufferCHROMIUM) /* 520 */ \
- OP(EnableFeatureCHROMIUM) /* 521 */ \
- OP(MapBufferRange) /* 522 */ \
- OP(UnmapBuffer) /* 523 */ \
- OP(FlushMappedBufferRange) /* 524 */ \
- OP(ResizeCHROMIUM) /* 525 */ \
- OP(GetRequestableExtensionsCHROMIUM) /* 526 */ \
- OP(RequestExtensionCHROMIUM) /* 527 */ \
- OP(GetProgramInfoCHROMIUM) /* 528 */ \
- OP(GetUniformBlocksCHROMIUM) /* 529 */ \
- OP(GetTransformFeedbackVaryingsCHROMIUM) /* 530 */ \
- OP(GetUniformsES3CHROMIUM) /* 531 */ \
- OP(DescheduleUntilFinishedCHROMIUM) /* 532 */ \
- OP(GetTranslatedShaderSourceANGLE) /* 533 */ \
- OP(PostSubBufferCHROMIUM) /* 534 */ \
- OP(CopyTextureCHROMIUM) /* 535 */ \
- OP(CopySubTextureCHROMIUM) /* 536 */ \
- OP(DrawArraysInstancedANGLE) /* 537 */ \
- OP(DrawArraysInstancedBaseInstanceANGLE) /* 538 */ \
- OP(DrawElementsInstancedANGLE) /* 539 */ \
- OP(DrawElementsInstancedBaseVertexBaseInstanceANGLE) /* 540 */ \
- OP(VertexAttribDivisorANGLE) /* 541 */ \
- OP(ProduceTextureDirectCHROMIUMImmediate) /* 542 */ \
- OP(CreateAndConsumeTextureINTERNALImmediate) /* 543 */ \
- OP(BindUniformLocationCHROMIUMBucket) /* 544 */ \
- OP(BindTexImage2DCHROMIUM) /* 545 */ \
- OP(BindTexImage2DWithInternalformatCHROMIUM) /* 546 */ \
- OP(ReleaseTexImage2DCHROMIUM) /* 547 */ \
- OP(TraceBeginCHROMIUM) /* 548 */ \
- OP(TraceEndCHROMIUM) /* 549 */ \
- OP(DiscardFramebufferEXTImmediate) /* 550 */ \
- OP(LoseContextCHROMIUM) /* 551 */ \
- OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 552 */ \
- OP(DrawBuffersEXTImmediate) /* 553 */ \
- OP(DiscardBackbufferCHROMIUM) /* 554 */ \
- OP(ScheduleOverlayPlaneCHROMIUM) /* 555 */ \
- OP(ScheduleCALayerSharedStateCHROMIUM) /* 556 */ \
- OP(ScheduleCALayerCHROMIUM) /* 557 */ \
- OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 558 */ \
- OP(CommitOverlayPlanesCHROMIUM) /* 559 */ \
- OP(FlushDriverCachesCHROMIUM) /* 560 */ \
- OP(ScheduleDCLayerCHROMIUM) /* 561 */ \
- OP(SetActiveURLCHROMIUM) /* 562 */ \
- OP(ContextVisibilityHintCHROMIUM) /* 563 */ \
- OP(CoverageModulationCHROMIUM) /* 564 */ \
- OP(BlendBarrierKHR) /* 565 */ \
- OP(BindFragDataLocationIndexedEXTBucket) /* 566 */ \
- OP(BindFragDataLocationEXTBucket) /* 567 */ \
- OP(GetFragDataIndexEXT) /* 568 */ \
- OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 569 */ \
- OP(OverlayPromotionHintCHROMIUM) /* 570 */ \
- OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 571 */ \
- OP(SetDrawRectangleCHROMIUM) /* 572 */ \
- OP(SetEnableDCLayersCHROMIUM) /* 573 */ \
- OP(InitializeDiscardableTextureCHROMIUM) /* 574 */ \
- OP(UnlockDiscardableTextureCHROMIUM) /* 575 */ \
- OP(LockDiscardableTextureCHROMIUM) /* 576 */ \
- OP(TexStorage2DImageCHROMIUM) /* 577 */ \
- OP(SetColorSpaceMetadataCHROMIUM) /* 578 */ \
- OP(WindowRectanglesEXTImmediate) /* 579 */ \
- OP(CreateGpuFenceINTERNAL) /* 580 */ \
- OP(WaitGpuFenceCHROMIUM) /* 581 */ \
- OP(DestroyGpuFenceCHROMIUM) /* 582 */ \
- OP(SetReadbackBufferShadowAllocationINTERNAL) /* 583 */ \
- OP(FramebufferTextureMultiviewOVR) /* 584 */ \
- OP(MaxShaderCompilerThreadsKHR) /* 585 */ \
- OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 586 */ \
- OP(BeginSharedImageAccessDirectCHROMIUM) /* 587 */ \
- OP(EndSharedImageAccessDirectCHROMIUM) /* 588 */ \
- OP(BeginBatchReadAccessSharedImageCHROMIUM) /* 589 */ \
- OP(EndBatchReadAccessSharedImageCHROMIUM) /* 590 */
+ OP(GetBooleani_v) /* 342 */ \
+ OP(GetBufferParameteri64v) /* 343 */ \
+ OP(GetBufferParameteriv) /* 344 */ \
+ OP(GetError) /* 345 */ \
+ OP(GetFloatv) /* 346 */ \
+ OP(GetFragDataLocation) /* 347 */ \
+ OP(GetFramebufferAttachmentParameteriv) /* 348 */ \
+ OP(GetInteger64v) /* 349 */ \
+ OP(GetIntegeri_v) /* 350 */ \
+ OP(GetInteger64i_v) /* 351 */ \
+ OP(GetIntegerv) /* 352 */ \
+ OP(GetInternalformativ) /* 353 */ \
+ OP(GetProgramiv) /* 354 */ \
+ OP(GetProgramInfoLog) /* 355 */ \
+ OP(GetRenderbufferParameteriv) /* 356 */ \
+ OP(GetSamplerParameterfv) /* 357 */ \
+ OP(GetSamplerParameteriv) /* 358 */ \
+ OP(GetShaderiv) /* 359 */ \
+ OP(GetShaderInfoLog) /* 360 */ \
+ OP(GetShaderPrecisionFormat) /* 361 */ \
+ OP(GetShaderSource) /* 362 */ \
+ OP(GetString) /* 363 */ \
+ OP(GetSynciv) /* 364 */ \
+ OP(GetTexParameterfv) /* 365 */ \
+ OP(GetTexParameteriv) /* 366 */ \
+ OP(GetTransformFeedbackVarying) /* 367 */ \
+ OP(GetUniformBlockIndex) /* 368 */ \
+ OP(GetUniformfv) /* 369 */ \
+ OP(GetUniformiv) /* 370 */ \
+ OP(GetUniformuiv) /* 371 */ \
+ OP(GetUniformIndices) /* 372 */ \
+ OP(GetUniformLocation) /* 373 */ \
+ OP(GetVertexAttribfv) /* 374 */ \
+ OP(GetVertexAttribiv) /* 375 */ \
+ OP(GetVertexAttribIiv) /* 376 */ \
+ OP(GetVertexAttribIuiv) /* 377 */ \
+ OP(GetVertexAttribPointerv) /* 378 */ \
+ OP(Hint) /* 379 */ \
+ OP(InvalidateFramebufferImmediate) /* 380 */ \
+ OP(InvalidateSubFramebufferImmediate) /* 381 */ \
+ OP(IsBuffer) /* 382 */ \
+ OP(IsEnabled) /* 383 */ \
+ OP(IsFramebuffer) /* 384 */ \
+ OP(IsProgram) /* 385 */ \
+ OP(IsRenderbuffer) /* 386 */ \
+ OP(IsSampler) /* 387 */ \
+ OP(IsShader) /* 388 */ \
+ OP(IsSync) /* 389 */ \
+ OP(IsTexture) /* 390 */ \
+ OP(IsTransformFeedback) /* 391 */ \
+ OP(LineWidth) /* 392 */ \
+ OP(LinkProgram) /* 393 */ \
+ OP(PauseTransformFeedback) /* 394 */ \
+ OP(PixelStorei) /* 395 */ \
+ OP(PolygonOffset) /* 396 */ \
+ OP(ReadBuffer) /* 397 */ \
+ OP(ReadPixels) /* 398 */ \
+ OP(ReleaseShaderCompiler) /* 399 */ \
+ OP(RenderbufferStorage) /* 400 */ \
+ OP(ResumeTransformFeedback) /* 401 */ \
+ OP(SampleCoverage) /* 402 */ \
+ OP(SamplerParameterf) /* 403 */ \
+ OP(SamplerParameterfvImmediate) /* 404 */ \
+ OP(SamplerParameteri) /* 405 */ \
+ OP(SamplerParameterivImmediate) /* 406 */ \
+ OP(Scissor) /* 407 */ \
+ OP(ShaderBinary) /* 408 */ \
+ OP(ShaderSourceBucket) /* 409 */ \
+ OP(MultiDrawBeginCHROMIUM) /* 410 */ \
+ OP(MultiDrawEndCHROMIUM) /* 411 */ \
+ OP(MultiDrawArraysCHROMIUM) /* 412 */ \
+ OP(MultiDrawArraysInstancedCHROMIUM) /* 413 */ \
+ OP(MultiDrawArraysInstancedBaseInstanceCHROMIUM) /* 414 */ \
+ OP(MultiDrawElementsCHROMIUM) /* 415 */ \
+ OP(MultiDrawElementsInstancedCHROMIUM) /* 416 */ \
+ OP(MultiDrawElementsInstancedBaseVertexBaseInstanceCHROMIUM) /* 417 */ \
+ OP(StencilFunc) /* 418 */ \
+ OP(StencilFuncSeparate) /* 419 */ \
+ OP(StencilMask) /* 420 */ \
+ OP(StencilMaskSeparate) /* 421 */ \
+ OP(StencilOp) /* 422 */ \
+ OP(StencilOpSeparate) /* 423 */ \
+ OP(TexImage2D) /* 424 */ \
+ OP(TexImage3D) /* 425 */ \
+ OP(TexParameterf) /* 426 */ \
+ OP(TexParameterfvImmediate) /* 427 */ \
+ OP(TexParameteri) /* 428 */ \
+ OP(TexParameterivImmediate) /* 429 */ \
+ OP(TexStorage3D) /* 430 */ \
+ OP(TexSubImage2D) /* 431 */ \
+ OP(TexSubImage3D) /* 432 */ \
+ OP(TransformFeedbackVaryingsBucket) /* 433 */ \
+ OP(Uniform1f) /* 434 */ \
+ OP(Uniform1fvImmediate) /* 435 */ \
+ OP(Uniform1i) /* 436 */ \
+ OP(Uniform1ivImmediate) /* 437 */ \
+ OP(Uniform1ui) /* 438 */ \
+ OP(Uniform1uivImmediate) /* 439 */ \
+ OP(Uniform2f) /* 440 */ \
+ OP(Uniform2fvImmediate) /* 441 */ \
+ OP(Uniform2i) /* 442 */ \
+ OP(Uniform2ivImmediate) /* 443 */ \
+ OP(Uniform2ui) /* 444 */ \
+ OP(Uniform2uivImmediate) /* 445 */ \
+ OP(Uniform3f) /* 446 */ \
+ OP(Uniform3fvImmediate) /* 447 */ \
+ OP(Uniform3i) /* 448 */ \
+ OP(Uniform3ivImmediate) /* 449 */ \
+ OP(Uniform3ui) /* 450 */ \
+ OP(Uniform3uivImmediate) /* 451 */ \
+ OP(Uniform4f) /* 452 */ \
+ OP(Uniform4fvImmediate) /* 453 */ \
+ OP(Uniform4i) /* 454 */ \
+ OP(Uniform4ivImmediate) /* 455 */ \
+ OP(Uniform4ui) /* 456 */ \
+ OP(Uniform4uivImmediate) /* 457 */ \
+ OP(UniformBlockBinding) /* 458 */ \
+ OP(UniformMatrix2fvImmediate) /* 459 */ \
+ OP(UniformMatrix2x3fvImmediate) /* 460 */ \
+ OP(UniformMatrix2x4fvImmediate) /* 461 */ \
+ OP(UniformMatrix3fvImmediate) /* 462 */ \
+ OP(UniformMatrix3x2fvImmediate) /* 463 */ \
+ OP(UniformMatrix3x4fvImmediate) /* 464 */ \
+ OP(UniformMatrix4fvImmediate) /* 465 */ \
+ OP(UniformMatrix4x2fvImmediate) /* 466 */ \
+ OP(UniformMatrix4x3fvImmediate) /* 467 */ \
+ OP(UseProgram) /* 468 */ \
+ OP(ValidateProgram) /* 469 */ \
+ OP(VertexAttrib1f) /* 470 */ \
+ OP(VertexAttrib1fvImmediate) /* 471 */ \
+ OP(VertexAttrib2f) /* 472 */ \
+ OP(VertexAttrib2fvImmediate) /* 473 */ \
+ OP(VertexAttrib3f) /* 474 */ \
+ OP(VertexAttrib3fvImmediate) /* 475 */ \
+ OP(VertexAttrib4f) /* 476 */ \
+ OP(VertexAttrib4fvImmediate) /* 477 */ \
+ OP(VertexAttribI4i) /* 478 */ \
+ OP(VertexAttribI4ivImmediate) /* 479 */ \
+ OP(VertexAttribI4ui) /* 480 */ \
+ OP(VertexAttribI4uivImmediate) /* 481 */ \
+ OP(VertexAttribIPointer) /* 482 */ \
+ OP(VertexAttribPointer) /* 483 */ \
+ OP(Viewport) /* 484 */ \
+ OP(WaitSync) /* 485 */ \
+ OP(BlitFramebufferCHROMIUM) /* 486 */ \
+ OP(RenderbufferStorageMultisampleCHROMIUM) /* 487 */ \
+ OP(RenderbufferStorageMultisampleAdvancedAMD) /* 488 */ \
+ OP(RenderbufferStorageMultisampleEXT) /* 489 */ \
+ OP(FramebufferTexture2DMultisampleEXT) /* 490 */ \
+ OP(TexStorage2DEXT) /* 491 */ \
+ OP(GenQueriesEXTImmediate) /* 492 */ \
+ OP(DeleteQueriesEXTImmediate) /* 493 */ \
+ OP(QueryCounterEXT) /* 494 */ \
+ OP(BeginQueryEXT) /* 495 */ \
+ OP(BeginTransformFeedback) /* 496 */ \
+ OP(EndQueryEXT) /* 497 */ \
+ OP(EndTransformFeedback) /* 498 */ \
+ OP(SetDisjointValueSyncCHROMIUM) /* 499 */ \
+ OP(InsertEventMarkerEXT) /* 500 */ \
+ OP(PushGroupMarkerEXT) /* 501 */ \
+ OP(PopGroupMarkerEXT) /* 502 */ \
+ OP(GenVertexArraysOESImmediate) /* 503 */ \
+ OP(DeleteVertexArraysOESImmediate) /* 504 */ \
+ OP(IsVertexArrayOES) /* 505 */ \
+ OP(BindVertexArrayOES) /* 506 */ \
+ OP(FramebufferParameteri) /* 507 */ \
+ OP(BindImageTexture) /* 508 */ \
+ OP(DispatchCompute) /* 509 */ \
+ OP(DispatchComputeIndirect) /* 510 */ \
+ OP(DrawArraysIndirect) /* 511 */ \
+ OP(DrawElementsIndirect) /* 512 */ \
+ OP(GetProgramInterfaceiv) /* 513 */ \
+ OP(GetProgramResourceIndex) /* 514 */ \
+ OP(GetProgramResourceName) /* 515 */ \
+ OP(GetProgramResourceiv) /* 516 */ \
+ OP(GetProgramResourceLocation) /* 517 */ \
+ OP(MemoryBarrierEXT) /* 518 */ \
+ OP(MemoryBarrierByRegion) /* 519 */ \
+ OP(SwapBuffers) /* 520 */ \
+ OP(GetMaxValueInBufferCHROMIUM) /* 521 */ \
+ OP(EnableFeatureCHROMIUM) /* 522 */ \
+ OP(MapBufferRange) /* 523 */ \
+ OP(UnmapBuffer) /* 524 */ \
+ OP(FlushMappedBufferRange) /* 525 */ \
+ OP(ResizeCHROMIUM) /* 526 */ \
+ OP(GetRequestableExtensionsCHROMIUM) /* 527 */ \
+ OP(RequestExtensionCHROMIUM) /* 528 */ \
+ OP(GetProgramInfoCHROMIUM) /* 529 */ \
+ OP(GetUniformBlocksCHROMIUM) /* 530 */ \
+ OP(GetTransformFeedbackVaryingsCHROMIUM) /* 531 */ \
+ OP(GetUniformsES3CHROMIUM) /* 532 */ \
+ OP(DescheduleUntilFinishedCHROMIUM) /* 533 */ \
+ OP(GetTranslatedShaderSourceANGLE) /* 534 */ \
+ OP(PostSubBufferCHROMIUM) /* 535 */ \
+ OP(CopyTextureCHROMIUM) /* 536 */ \
+ OP(CopySubTextureCHROMIUM) /* 537 */ \
+ OP(DrawArraysInstancedANGLE) /* 538 */ \
+ OP(DrawArraysInstancedBaseInstanceANGLE) /* 539 */ \
+ OP(DrawElementsInstancedANGLE) /* 540 */ \
+ OP(DrawElementsInstancedBaseVertexBaseInstanceANGLE) /* 541 */ \
+ OP(VertexAttribDivisorANGLE) /* 542 */ \
+ OP(ProduceTextureDirectCHROMIUMImmediate) /* 543 */ \
+ OP(CreateAndConsumeTextureINTERNALImmediate) /* 544 */ \
+ OP(BindUniformLocationCHROMIUMBucket) /* 545 */ \
+ OP(BindTexImage2DCHROMIUM) /* 546 */ \
+ OP(BindTexImage2DWithInternalformatCHROMIUM) /* 547 */ \
+ OP(ReleaseTexImage2DCHROMIUM) /* 548 */ \
+ OP(TraceBeginCHROMIUM) /* 549 */ \
+ OP(TraceEndCHROMIUM) /* 550 */ \
+ OP(DiscardFramebufferEXTImmediate) /* 551 */ \
+ OP(LoseContextCHROMIUM) /* 552 */ \
+ OP(UnpremultiplyAndDitherCopyCHROMIUM) /* 553 */ \
+ OP(DrawBuffersEXTImmediate) /* 554 */ \
+ OP(DiscardBackbufferCHROMIUM) /* 555 */ \
+ OP(ScheduleOverlayPlaneCHROMIUM) /* 556 */ \
+ OP(ScheduleCALayerSharedStateCHROMIUM) /* 557 */ \
+ OP(ScheduleCALayerCHROMIUM) /* 558 */ \
+ OP(ScheduleCALayerInUseQueryCHROMIUMImmediate) /* 559 */ \
+ OP(CommitOverlayPlanesCHROMIUM) /* 560 */ \
+ OP(FlushDriverCachesCHROMIUM) /* 561 */ \
+ OP(ScheduleDCLayerCHROMIUM) /* 562 */ \
+ OP(SetActiveURLCHROMIUM) /* 563 */ \
+ OP(ContextVisibilityHintCHROMIUM) /* 564 */ \
+ OP(CoverageModulationCHROMIUM) /* 565 */ \
+ OP(BlendBarrierKHR) /* 566 */ \
+ OP(BindFragDataLocationIndexedEXTBucket) /* 567 */ \
+ OP(BindFragDataLocationEXTBucket) /* 568 */ \
+ OP(GetFragDataIndexEXT) /* 569 */ \
+ OP(UniformMatrix4fvStreamTextureMatrixCHROMIUMImmediate) /* 570 */ \
+ OP(OverlayPromotionHintCHROMIUM) /* 571 */ \
+ OP(SwapBuffersWithBoundsCHROMIUMImmediate) /* 572 */ \
+ OP(SetDrawRectangleCHROMIUM) /* 573 */ \
+ OP(SetEnableDCLayersCHROMIUM) /* 574 */ \
+ OP(InitializeDiscardableTextureCHROMIUM) /* 575 */ \
+ OP(UnlockDiscardableTextureCHROMIUM) /* 576 */ \
+ OP(LockDiscardableTextureCHROMIUM) /* 577 */ \
+ OP(TexStorage2DImageCHROMIUM) /* 578 */ \
+ OP(SetColorSpaceMetadataCHROMIUM) /* 579 */ \
+ OP(WindowRectanglesEXTImmediate) /* 580 */ \
+ OP(CreateGpuFenceINTERNAL) /* 581 */ \
+ OP(WaitGpuFenceCHROMIUM) /* 582 */ \
+ OP(DestroyGpuFenceCHROMIUM) /* 583 */ \
+ OP(SetReadbackBufferShadowAllocationINTERNAL) /* 584 */ \
+ OP(FramebufferTextureMultiviewOVR) /* 585 */ \
+ OP(MaxShaderCompilerThreadsKHR) /* 586 */ \
+ OP(CreateAndTexStorage2DSharedImageINTERNALImmediate) /* 587 */ \
+ OP(BeginSharedImageAccessDirectCHROMIUM) /* 588 */ \
+ OP(EndSharedImageAccessDirectCHROMIUM) /* 589 */ \
+ OP(BeginBatchReadAccessSharedImageCHROMIUM) /* 590 */ \
+ OP(EndBatchReadAccessSharedImageCHROMIUM) /* 591 */ \
+ OP(EnableiOES) /* 592 */ \
+ OP(DisableiOES) /* 593 */ \
+ OP(BlendEquationiOES) /* 594 */ \
+ OP(BlendEquationSeparateiOES) /* 595 */ \
+ OP(BlendFunciOES) /* 596 */ \
+ OP(BlendFuncSeparateiOES) /* 597 */ \
+ OP(ColorMaskiOES) /* 598 */ \
+ OP(IsEnablediOES) /* 599 */
enum CommandId {
kOneBeforeStartPoint =
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
index 5a062329521..90a60f27ac2 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.cc
@@ -15,7 +15,8 @@
#include <sstream>
-#include "base/logging.h"
+#include "base/check_op.h"
+#include "base/notreached.h"
#include "base/numerics/safe_math.h"
#include "base/stl_util.h"
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
index 238fce0975b..4d6be9b2fb4 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils.h
@@ -14,7 +14,7 @@
#include <limits>
#include <string>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/numerics/safe_math.h"
#include "gpu/command_buffer/common/gles2_utils_export.h"
diff --git a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
index f7510c53ddd..3ceac4dfc5b 100644
--- a/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/common/gles2_cmd_utils_implementation_autogen.h
@@ -2685,6 +2685,10 @@ static const GLES2Util::EnumToString enum_to_string_table[] = {
"GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM",
},
{
+ 0x8AF8,
+ "GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM",
+ },
+ {
0x8B30,
"GL_FRAGMENT_SHADER",
},
@@ -7447,6 +7451,13 @@ std::string GLES2Util::GetStringIndexedGLState(uint32_t value) {
{GL_UNIFORM_BUFFER_BINDING, "GL_UNIFORM_BUFFER_BINDING"},
{GL_UNIFORM_BUFFER_SIZE, "GL_UNIFORM_BUFFER_SIZE"},
{GL_UNIFORM_BUFFER_START, "GL_UNIFORM_BUFFER_START"},
+ {GL_BLEND_EQUATION_RGB, "GL_BLEND_EQUATION_RGB"},
+ {GL_BLEND_EQUATION_ALPHA, "GL_BLEND_EQUATION_ALPHA"},
+ {GL_BLEND_SRC_RGB, "GL_BLEND_SRC_RGB"},
+ {GL_BLEND_SRC_ALPHA, "GL_BLEND_SRC_ALPHA"},
+ {GL_BLEND_DST_RGB, "GL_BLEND_DST_RGB"},
+ {GL_BLEND_DST_ALPHA, "GL_BLEND_DST_ALPHA"},
+ {GL_COLOR_WRITEMASK, "GL_COLOR_WRITEMASK"},
};
return GLES2Util::GetQualifiedEnumString(string_table,
base::size(string_table), value);
@@ -7777,6 +7788,8 @@ std::string GLES2Util::GetStringShaderType(uint32_t value) {
std::string GLES2Util::GetStringSharedImageAccessMode(uint32_t value) {
static const EnumToString string_table[] = {
+ {GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM,
+ "GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM"},
{GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM,
"GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM"},
{GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM,
diff --git a/chromium/gpu/command_buffer/common/mailbox.h b/chromium/gpu/command_buffer/common/mailbox.h
index 5cd20835b43..9c68b87b316 100644
--- a/chromium/gpu/command_buffer/common/mailbox.h
+++ b/chromium/gpu/command_buffer/common/mailbox.h
@@ -10,7 +10,7 @@
#include <string>
-#include "gpu/gpu_export.h"
+#include "base/component_export.h"
// From gl2/gl2ext.h.
#ifndef GL_MAILBOX_SIZE_CHROMIUM
@@ -26,7 +26,7 @@ namespace gpu {
// name is valid.
// See src/gpu/GLES2/extensions/CHROMIUM/CHROMIUM_texture_mailbox.txt for more
// details.
-struct GPU_EXPORT Mailbox {
+struct COMPONENT_EXPORT(GPU_MAILBOX) Mailbox {
using Name = int8_t[GL_MAILBOX_SIZE_CHROMIUM];
Mailbox();
diff --git a/chromium/gpu/command_buffer/common/raster_cmd_format.h b/chromium/gpu/command_buffer/common/raster_cmd_format.h
index 3e7a7e1b7f1..c8b8b072c68 100644
--- a/chromium/gpu/command_buffer/common/raster_cmd_format.h
+++ b/chromium/gpu/command_buffer/common/raster_cmd_format.h
@@ -12,7 +12,6 @@
#include <string.h>
#include "base/atomicops.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/command_buffer/common/bitfield_helpers.h"
diff --git a/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h b/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h
index c2971e249c4..766c0bcb20b 100644
--- a/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h
+++ b/chromium/gpu/command_buffer/common/swap_buffers_complete_params.h
@@ -5,8 +5,10 @@
#ifndef GPU_COMMAND_BUFFER_COMMON_SWAP_BUFFERS_COMPLETE_PARAMS_H_
#define GPU_COMMAND_BUFFER_COMMON_SWAP_BUFFERS_COMPLETE_PARAMS_H_
+#include "base/optional.h"
#include "gpu/command_buffer/common/texture_in_use_response.h"
#include "ui/gfx/ca_layer_params.h"
+#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/swap_result.h"
namespace gpu {
@@ -20,6 +22,12 @@ struct GPU_EXPORT SwapBuffersCompleteParams {
~SwapBuffersCompleteParams();
gfx::SwapResponse swap_response;
+
+ // Damage area of the current backing buffer compare to the previous swapped
+ // buffer. The renderer can use it as hint for minimizing drawing area for the
+ // next frame.
+ base::Optional<gfx::Rect> frame_buffer_damage_area;
+
// Used only on macOS, for coordinating IOSurface reuse with the system
// WindowServer.
gpu::TextureInUseResponses texture_in_use_responses;
diff --git a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
index 6115cb03744..bdc472c3878 100644
--- a/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
+++ b/chromium/gpu/command_buffer/gles2_cmd_buffer_functions.txt
@@ -87,6 +87,7 @@ GL_APICALL void GL_APIENTRY glGetActiveUniformsiv (GLidProgram program,
GL_APICALL void GL_APIENTRY glGetAttachedShaders (GLidProgram program, GLsizeiNotNegative maxcount, GLsizeiOptional* count, GLuint* shaders);
GL_APICALL GLint GL_APIENTRY glGetAttribLocation (GLidProgram program, const char* name);
GL_APICALL void GL_APIENTRY glGetBooleanv (GLenumGLState pname, GLboolean* params);
+GL_APICALL void GL_APIENTRY glGetBooleani_v (GLenumIndexedGLState pname, GLuint index, GLboolean* data);
GL_APICALL void GL_APIENTRY glGetBufferParameteri64v (GLenumBufferTarget target, GLenumBufferParameter64 pname, GLint64* params);
GL_APICALL void GL_APIENTRY glGetBufferParameteriv (GLenumBufferTarget target, GLenumBufferParameter pname, GLint* params);
GL_APICALL GLenum GL_APIENTRY glGetError (void);
@@ -415,3 +416,13 @@ GL_APICALL void GL_APIENTRY glBeginSharedImageAccessDirectCHROMIUM (GLui
GL_APICALL void GL_APIENTRY glEndSharedImageAccessDirectCHROMIUM (GLuint texture);
GL_APICALL void GL_APIENTRY glBeginBatchReadAccessSharedImageCHROMIUM (void);
GL_APICALL void GL_APIENTRY glEndBatchReadAccessSharedImageCHROMIUM (void);
+
+// Extension OES_draw_buffers_indexed
+GL_APICALL void GL_APIENTRY glEnableiOES (GLenum target, GLuint index);
+GL_APICALL void GL_APIENTRY glDisableiOES (GLenum target, GLuint index);
+GL_APICALL void GL_APIENTRY glBlendEquationiOES (GLuint buf, GLenum mode);
+GL_APICALL void GL_APIENTRY glBlendEquationSeparateiOES (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+GL_APICALL void GL_APIENTRY glBlendFunciOES (GLuint buf, GLenum src, GLenum dst);
+GL_APICALL void GL_APIENTRY glBlendFuncSeparateiOES (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+GL_APICALL void GL_APIENTRY glColorMaskiOES (GLuint buf, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
+GL_APICALL GLboolean GL_APIENTRY glIsEnablediOES (GLenum target, GLuint index);
diff --git a/chromium/gpu/command_buffer/service/BUILD.gn b/chromium/gpu/command_buffer/service/BUILD.gn
index b6ad2bd114b..16cb6065726 100644
--- a/chromium/gpu/command_buffer/service/BUILD.gn
+++ b/chromium/gpu/command_buffer/service/BUILD.gn
@@ -227,6 +227,7 @@ target(link_target_type, "gles2_sources") {
"shared_image_backing_factory.h",
"shared_image_backing_factory_gl_texture.cc",
"shared_image_backing_factory_gl_texture.h",
+ "shared_image_backing_factory_gl_texture_internal.h",
"shared_image_factory.cc",
"shared_image_factory.h",
"shared_image_manager.cc",
@@ -235,6 +236,8 @@ target(link_target_type, "gles2_sources") {
"shared_image_representation.h",
"shared_image_representation_skia_gl.cc",
"shared_image_representation_skia_gl.h",
+ "shared_memory_region_wrapper.cc",
+ "shared_memory_region_wrapper.h",
"skia_utils.cc",
"skia_utils.h",
"texture_definition.cc",
diff --git a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
index 9d0d7f74222..43337b5c2ba 100644
--- a/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/abstract_texture_impl_shared_context_state.cc
@@ -46,10 +46,10 @@ AbstractTextureImplOnSharedContext::AbstractTextureImplOnSharedContext(
texture_ = new gpu::gles2::Texture(service_id);
texture_->SetLightweightRef();
texture_->SetTarget(target, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture_->set_min_filter(GL_LINEAR);
+ texture_->set_mag_filter(GL_LINEAR);
+ texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
gfx::Rect cleared_rect;
texture_->SetLevelInfo(target, 0, internal_format, width, height, depth,
border, format, type, cleared_rect);
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
index 5224766c14d..a53dae3bdcd 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.cc
@@ -6,10 +6,102 @@
#include <android/hardware_buffer.h>
+#include "base/android/scoped_hardware_buffer_handle.h"
#include "base/check.h"
#include "base/notreached.h"
+#include "components/viz/common/gpu/vulkan_context_provider.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "gpu/command_buffer/service/shared_context_state.h"
+#include "gpu/command_buffer/service/texture_manager.h"
+#include "gpu/vulkan/vulkan_image.h"
+#include "ui/gfx/color_space.h"
+#include "ui/gfx/geometry/size.h"
+#include "ui/gl/gl_gl_api_implementation.h"
+#include "ui/gl/gl_image_ahardwarebuffer.h"
+#include "ui/gl/scoped_binders.h"
namespace gpu {
+namespace {
+
+gles2::Texture* MakeGLTexture(
+ GLenum target,
+ GLuint service_id,
+ scoped_refptr<gl::GLImageAHardwareBuffer> egl_image,
+ const gfx::Size& size,
+ const gfx::Rect& cleared_rect) {
+ auto* texture = new gles2::Texture(service_id);
+ texture->SetLightweightRef();
+ texture->SetTarget(target, 1);
+ texture->set_min_filter(GL_LINEAR);
+ texture->set_mag_filter(GL_LINEAR);
+ texture->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture->set_wrap_s(GL_CLAMP_TO_EDGE);
+
+ texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(), size.width(),
+ size.height(), 1, 0, egl_image->GetDataFormat(),
+ egl_image->GetDataType(), cleared_rect);
+ texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
+ texture->SetImmutable(true, false);
+ return texture;
+}
+
+scoped_refptr<gles2::TexturePassthrough> MakeGLTexturePassthrough(
+ GLenum target,
+ GLuint service_id,
+ scoped_refptr<gl::GLImageAHardwareBuffer> egl_image,
+ const size_t estimated_size) {
+ auto passthrough_texture =
+ base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
+ passthrough_texture->SetEstimatedSize(estimated_size);
+ passthrough_texture->SetLevelImage(target, 0, egl_image.get());
+ passthrough_texture->set_is_bind_pending(false);
+ return passthrough_texture;
+}
+
+void GenGLTextureInternal(
+ AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect,
+ scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
+ gles2::Texture** texture) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ GLuint service_id = 0;
+ api->glGenTexturesFn(1, &service_id);
+ gl::ScopedTextureBinder texture_binder(target, service_id);
+
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+
+ // Create an egl image using AHardwareBuffer.
+ auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size);
+ if (!egl_image->Initialize(buffer, false)) {
+ LOG(ERROR) << "Failed to create EGL image";
+ api->glDeleteTexturesFn(1, &service_id);
+ return;
+ }
+
+ if (!egl_image->BindTexImage(target)) {
+ LOG(ERROR) << "Failed to bind egl image";
+ api->glDeleteTexturesFn(1, &service_id);
+ return;
+ }
+ egl_image->SetColorSpace(color_space);
+
+ if (passthrough_texture) {
+ *passthrough_texture = MakeGLTexturePassthrough(
+ target, service_id, std::move(egl_image), estimated_size);
+ } else {
+ *texture = MakeGLTexture(target, service_id, std::move(egl_image), size,
+ cleared_rect);
+ }
+}
+
+} // namespace
bool AHardwareBufferSupportedFormat(viz::ResourceFormat format) {
switch (format) {
@@ -46,4 +138,46 @@ unsigned int AHardwareBufferFormat(viz::ResourceFormat format) {
}
}
+gles2::Texture* GenGLTexture(AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect) {
+ gles2::Texture* texture = nullptr;
+ GenGLTextureInternal(buffer, target, color_space, size, estimated_size,
+ cleared_rect, nullptr /* passthrough_texture */,
+ &texture);
+ return texture;
+}
+
+scoped_refptr<gles2::TexturePassthrough> GenGLTexturePassthrough(
+ AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect) {
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture;
+ GenGLTextureInternal(buffer, target, color_space, size, estimated_size,
+ cleared_rect, &passthrough_texture,
+ nullptr /* texture */);
+ return passthrough_texture;
+}
+
+std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ SharedContextState* context_state,
+ const gfx::Size& size,
+ const viz::ResourceFormat& format) {
+ DCHECK(context_state);
+ DCHECK(context_state->GrContextIsVulkan());
+
+ auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
+ gfx::GpuMemoryBufferHandle gmb_handle(std::move(ahb_handle));
+ return VulkanImage::CreateFromGpuMemoryBufferHandle(
+ device_queue, std::move(gmb_handle), size, ToVkFormat(format),
+ 0 /* usage */);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
index 77a32393676..a3106ae53a5 100644
--- a/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
+++ b/chromium/gpu/command_buffer/service/ahardwarebuffer_utils.h
@@ -5,10 +5,36 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_
#define GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_
+#include <memory>
+
+#include "base/memory/scoped_refptr.h"
#include "components/viz/common/resources/resource_format.h"
#include "gpu/gpu_gles2_export.h"
+extern "C" typedef struct AHardwareBuffer AHardwareBuffer;
+
+typedef unsigned int GLenum;
+
+namespace base {
+namespace android {
+class ScopedHardwareBufferHandle;
+} // namespace android
+} // namespace base
+
+namespace gfx {
+class ColorSpace;
+class Rect;
+class Size;
+} // namespace gfx
+
namespace gpu {
+class SharedContextState;
+class VulkanImage;
+
+namespace gles2 {
+class Texture;
+class TexturePassthrough;
+} // namespace gles2
// TODO(vikassoni): In future we will need to expose the set of formats and
// constraints (e.g. max size) to the clients somehow that are available for
@@ -25,6 +51,33 @@ AHardwareBufferSupportedFormat(viz::ResourceFormat format);
// Returns the corresponding AHardwareBuffer format.
unsigned int GPU_GLES2_EXPORT AHardwareBufferFormat(viz::ResourceFormat format);
+// Generates a gles2 texture from AHB. This method must be called with a current
+// GLContext which will be used to create the Texture. This method adds a
+// lightweight ref on the Texture which the caller is responsible for releasing.
+gles2::Texture* GenGLTexture(AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect);
+
+// Generates a passthrough texture from AHB. This method must be called with a
+// current GLContext which will be used to create the Texture.
+scoped_refptr<gles2::TexturePassthrough> GenGLTexturePassthrough(
+ AHardwareBuffer* buffer,
+ GLenum target,
+ const gfx::ColorSpace& color_space,
+ const gfx::Size& size,
+ const size_t estimated_size,
+ const gfx::Rect& cleared_rect);
+
+// Create a vulkan image from the AHB handle.
+std::unique_ptr<VulkanImage> CreateVkImageFromAhbHandle(
+ base::android::ScopedHardwareBufferHandle ahb_handle,
+ SharedContextState* context_state,
+ const gfx::Size& size,
+ const viz::ResourceFormat& format);
+
} // namespace gpu
#endif // GPU_COMMAND_BUFFER_SERVICE_AHARDWAREBUFFER_UTILS_H_
diff --git a/chromium/gpu/command_buffer/service/buffer_manager.h b/chromium/gpu/command_buffer/service/buffer_manager.h
index 86c3561104a..67b042d99af 100644
--- a/chromium/gpu/command_buffer/service/buffer_manager.h
+++ b/chromium/gpu/command_buffer/service/buffer_manager.h
@@ -14,7 +14,7 @@
#include <unordered_map>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/common/buffer.h"
diff --git a/chromium/gpu/command_buffer/service/client_service_map.h b/chromium/gpu/command_buffer/service/client_service_map.h
index d19484ee1a7..56caf0d3b2c 100644
--- a/chromium/gpu/command_buffer/service/client_service_map.h
+++ b/chromium/gpu/command_buffer/service/client_service_map.h
@@ -9,7 +9,7 @@
#include <unordered_map>
#include <vector>
-#include "base/logging.h"
+#include "base/check.h"
namespace gpu {
diff --git a/chromium/gpu/command_buffer/service/context_group.cc b/chromium/gpu/command_buffer/service/context_group.cc
index 51452b9ce43..f05b4688919 100644
--- a/chromium/gpu/command_buffer/service/context_group.cc
+++ b/chromium/gpu/command_buffer/service/context_group.cc
@@ -58,6 +58,7 @@ DisallowedFeatures AdjustDisallowedFeatures(
adjusted_disallowed_features.ext_texture_filter_anisotropic = true;
adjusted_disallowed_features.ext_float_blend = true;
adjusted_disallowed_features.oes_fbo_render_mipmap = true;
+ adjusted_disallowed_features.oes_draw_buffers_indexed = true;
}
return adjusted_disallowed_features;
}
diff --git a/chromium/gpu/command_buffer/service/context_state.h b/chromium/gpu/command_buffer/service/context_state.h
index 531313953a4..3f04a3dd6aa 100644
--- a/chromium/gpu/command_buffer/service/context_state.h
+++ b/chromium/gpu/command_buffer/service/context_state.h
@@ -10,7 +10,8 @@
#include <memory>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
+#include "base/notreached.h"
#include "gpu/command_buffer/service/gl_utils.h"
#include "gpu/command_buffer/service/sampler_manager.h"
#include "gpu/command_buffer/service/shader_manager.h"
diff --git a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
index ec299201fb8..eb55bbd0845 100644
--- a/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
+++ b/chromium/gpu/command_buffer/service/copy_texture_chromium_mock.h
@@ -116,51 +116,6 @@ class MockCopyTextureResourceManager
bool dither,
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override {}
- void DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) override {}
- void DoCopyTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) override {}
private:
DISALLOW_COPY_AND_ASSIGN(MockCopyTextureResourceManager);
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
index 05778fdf5b7..5e38edb1270 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.cc
@@ -8,9 +8,7 @@
#include <vector>
#include "base/stl_util.h"
-#include "base/system/sys_info.h"
#include "build/build_config.h"
-#include "components/viz/common/resources/resource_format_utils.h"
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/service/external_vk_image_gl_representation.h"
#include "gpu/command_buffer/service/external_vk_image_skia_representation.h"
@@ -23,6 +21,7 @@
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_image.h"
#include "gpu/vulkan/vulkan_util.h"
+#include "third_party/skia/include/gpu/GrBackendSemaphore.h"
#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/buildflags.h"
#include "ui/gl/gl_context.h"
@@ -138,8 +137,19 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
VkFormat vk_format = ToVkFormat(format);
- VkImageUsageFlags vk_usage =
- VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
+
+ constexpr auto kUsageNeedsColorAttachment =
+ SHARED_IMAGE_USAGE_GLES2 | SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_OOP_RASTERIZATION | SHARED_IMAGE_USAGE_WEBGPU;
+ VkImageUsageFlags vk_usage = VK_IMAGE_USAGE_SAMPLED_BIT;
+ if (usage & kUsageNeedsColorAttachment) {
+ vk_usage |= VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
+ if (format == viz::ETC1) {
+ DLOG(ERROR) << "ETC1 format cannot be used as color attachment.";
+ return nullptr;
+ }
+ }
+
if (is_transfer_dst)
vk_usage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
@@ -155,10 +165,20 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
auto* vulkan_implementation =
context_state->vk_context_provider()->GetVulkanImplementation();
- VkImageCreateFlags vk_flags =
- vulkan_implementation->enforce_protected_memory()
- ? VK_IMAGE_CREATE_PROTECTED_BIT
- : 0;
+ VkImageCreateFlags vk_flags = 0;
+
+ // In protected mode mark the image as protected, except when the image needs
+ // GLES2, but not Raster usage. ANGLE currenctly doesn't support protected
+ // images. Some clients request GLES2 and Raster usage (e.g. see
+ // GpuMemoryBufferVideoFramePool). In that case still allocate protected
+ // image, which ensures that image can still usable, but it may not work in
+ // some scenarios (e.g. when the video frame is used in WebGL).
+ if (vulkan_implementation->enforce_protected_memory() &&
+ (!(usage & SHARED_IMAGE_USAGE_GLES2) ||
+ (usage & SHARED_IMAGE_USAGE_RASTER))) {
+ vk_flags |= VK_IMAGE_CREATE_PROTECTED_BIT;
+ }
+
std::unique_ptr<VulkanImage> image;
if (is_external) {
image = VulkanImage::CreateWithExternalMemory(device_queue, size, vk_format,
@@ -176,7 +196,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::Create(
color_space, usage, context_state, std::move(image), command_pool);
if (!pixel_data.empty()) {
- backing->WritePixels(
+ backing->WritePixelsWithCallback(
pixel_data.size(), 0,
base::BindOnce([](const void* data, size_t size,
void* buffer) { memcpy(buffer, data, size); },
@@ -228,73 +248,10 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
}
DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
- if (!base::IsValueInRangeForNumericType<size_t>(handle.stride))
- return nullptr;
- int32_t width_in_bytes = 0;
- if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), resource_format,
- &width_in_bytes)) {
- DLOG(ERROR) << "ResourceSizes::MaybeWidthInBytes() failed.";
+ SharedMemoryRegionWrapper shared_memory_wrapper;
+ if (!shared_memory_wrapper.Initialize(handle, size, resource_format))
return nullptr;
- }
-
- if (handle.stride < width_in_bytes) {
- DLOG(ERROR) << "Invalid GMB stride.";
- return nullptr;
- }
-
- auto bits_per_pixel = viz::BitsPerPixel(resource_format);
- switch (bits_per_pixel) {
- case 64:
- case 32:
- case 16:
- if (handle.stride % (bits_per_pixel / 8) != 0) {
- DLOG(ERROR) << "Invalid GMB stride.";
- return nullptr;
- }
- break;
- case 8:
- case 4:
- break;
- case 12:
- // We are not supporting YVU420 and YUV_420_BIPLANAR format.
- default:
- NOTREACHED();
- return nullptr;
- }
-
- if (!handle.region.IsValid()) {
- DLOG(ERROR) << "Invalid GMB shared memory region.";
- return nullptr;
- }
-
- base::CheckedNumeric<size_t> checked_size = handle.stride;
- checked_size *= size.height();
- if (!checked_size.IsValid()) {
- DLOG(ERROR) << "Invalid GMB size.";
- return nullptr;
- }
-
- // Minimize the amount of address space we use but make sure offset is a
- // multiple of page size as required by MapAt().
- size_t memory_offset =
- handle.offset % base::SysInfo::VMAllocationGranularity();
- size_t map_offset =
- base::SysInfo::VMAllocationGranularity() *
- (handle.offset / base::SysInfo::VMAllocationGranularity());
- checked_size += memory_offset;
- if (!checked_size.IsValid()) {
- DLOG(ERROR) << "Invalid GMB size.";
- return nullptr;
- }
-
- auto shared_memory_mapping = handle.region.MapAt(
- static_cast<off_t>(map_offset), checked_size.ValueOrDie());
-
- if (!shared_memory_mapping.IsValid()) {
- DLOG(ERROR) << "Failed to map shared memory.";
- return nullptr;
- }
auto backing = Create(context_state, command_pool, mailbox, resource_format,
size, color_space, usage, image_usage_cache,
@@ -302,8 +259,7 @@ std::unique_ptr<ExternalVkImageBacking> ExternalVkImageBacking::CreateFromGMB(
if (!backing)
return nullptr;
- backing->InstallSharedMemory(std::move(shared_memory_mapping), handle.stride,
- memory_offset);
+ backing->InstallSharedMemory(std::move(shared_memory_wrapper));
return backing;
}
@@ -366,21 +322,36 @@ bool ExternalVkImageBacking::BeginAccess(
bool readonly,
std::vector<SemaphoreHandle>* semaphore_handles,
bool is_gl) {
+ DLOG_IF(ERROR, gl_reads_in_progress_ != 0 && !is_gl)
+ << "Backing is being accessed by both GL and Vulkan.";
+ // Do not need do anything for the second and following GL read access.
+ if (is_gl && readonly && gl_reads_in_progress_) {
+ ++gl_reads_in_progress_;
+ return true;
+ }
+
if (readonly && !reads_in_progress_) {
UpdateContent(kInVkImage);
if (texture_)
UpdateContent(kInGLTexture);
}
+
if (!BeginAccessInternal(readonly, semaphore_handles))
return false;
if (!is_gl)
return true;
+ if (readonly) {
+ DCHECK(!gl_reads_in_progress_);
+ gl_reads_in_progress_ = 1;
+ }
+
if (use_separate_gl_texture())
return true;
DCHECK(need_synchronization());
+ DCHECK(is_gl);
auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
{
@@ -401,7 +372,7 @@ bool ExternalVkImageBacking::BeginAccess(
uint32_t vulkan_queue_index = context_state_->vk_context_provider()
->GetDeviceQueue()
->GetVulkanQueueIndex();
- // Transfer image queue faimily ownership to external, so the image can be
+ // Transfer image queue family ownership to external, so the image can be
// used by GL.
command_buffer->TransitionImageLayout(image_info.fImage, image_layout,
image_layout, vulkan_queue_index,
@@ -422,9 +393,9 @@ bool ExternalVkImageBacking::BeginAccess(
// TODO(penghuang): ask skia to do it for us to avoid this queue submission.
command_buffer->Submit(wait_semaphores.size(), wait_semaphores.data(), 1,
&signal_semaphore);
- auto end_access_semphore_handle =
+ auto end_access_semaphore_handle =
vulkan_implementation()->GetSemaphoreHandle(device(), signal_semaphore);
- semaphore_handles->push_back(std::move(end_access_semphore_handle));
+ semaphore_handles->push_back(std::move(end_access_semaphore_handle));
auto* fence_helper =
context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
@@ -440,7 +411,17 @@ bool ExternalVkImageBacking::BeginAccess(
void ExternalVkImageBacking::EndAccess(bool readonly,
SemaphoreHandle semaphore_handle,
bool is_gl) {
+ if (is_gl && readonly) {
+ DCHECK(gl_reads_in_progress_);
+ if (--gl_reads_in_progress_ > 0) {
+ DCHECK(!semaphore_handle.is_valid());
+ return;
+ }
+ }
+
+ // Only transite image layout and queue back when it is the last gl access.
if (is_gl && !use_separate_gl_texture()) {
+ DCHECK(semaphore_handle.is_valid());
auto command_buffer = command_pool_->CreatePrimaryCommandBuffer();
{
ScopedSingleUseCommandBufferRecorder recorder(*command_buffer);
@@ -629,10 +610,10 @@ ExternalVkImageBacking::ProduceGLTexture(SharedImageManager* manager,
texture_ = new gles2::Texture(texture_service_id);
texture_->SetLightweightRef();
texture_->SetTarget(GL_TEXTURE_2D, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture_->set_min_filter(GL_LINEAR);
+ texture_->set_mag_filter(GL_LINEAR);
+ texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect;
if (IsCleared())
@@ -689,14 +670,10 @@ ExternalVkImageBacking::ProduceSkia(
}
void ExternalVkImageBacking::InstallSharedMemory(
- base::WritableSharedMemoryMapping shared_memory_mapping,
- size_t stride,
- size_t memory_offset) {
- DCHECK(!shared_memory_mapping_.IsValid());
- DCHECK(shared_memory_mapping.IsValid());
- shared_memory_mapping_ = std::move(shared_memory_mapping);
- stride_ = stride;
- memory_offset_ = memory_offset;
+ SharedMemoryRegionWrapper shared_memory_wrapper) {
+ DCHECK(!shared_memory_wrapper_.IsValid());
+ DCHECK(shared_memory_wrapper.IsValid());
+ shared_memory_wrapper_ = std::move(shared_memory_wrapper);
Update(nullptr);
}
@@ -713,18 +690,10 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
if (content_flags == kInVkImage) {
if (latest_content_ & kInSharedMemory) {
- if (!shared_memory_mapping_.IsValid())
+ if (!shared_memory_wrapper_.IsValid())
return;
- auto pixel_data =
- shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
- memory_offset_);
- if (!WritePixels(
- pixel_data.size(), stride_,
- base::BindOnce([](const void* data, size_t size,
- void* buffer) { memcpy(buffer, data, size); },
- pixel_data.data(), pixel_data.size()))) {
+ if (!WritePixels())
return;
- }
latest_content_ |=
use_separate_gl_texture() ? kInVkImage : kInVkImage | kInGLTexture;
return;
@@ -748,9 +717,10 @@ void ExternalVkImageBacking::UpdateContent(uint32_t content_flags) {
}
}
-bool ExternalVkImageBacking::WritePixels(size_t data_size,
- size_t stride,
- FillBufferCallback callback) {
+bool ExternalVkImageBacking::WritePixelsWithCallback(
+ size_t data_size,
+ size_t stride,
+ FillBufferCallback callback) {
DCHECK(stride == 0 || size().height() * stride <= data_size);
VkBufferCreateInfo buffer_create_info = {
@@ -811,6 +781,8 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
size().width(), size().height());
}
+ SetCleared();
+
if (!need_synchronization()) {
DCHECK(handles.empty());
command_buffer->Submit(0, nullptr, 0, nullptr);
@@ -823,7 +795,6 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
std::move(command_buffer));
fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
stage_allocation);
-
return true;
}
@@ -841,10 +812,11 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
begin_access_semaphores.data(), 1,
&end_access_semaphore);
- auto end_access_semphore_handle = vulkan_implementation()->GetSemaphoreHandle(
- device(), end_access_semaphore);
+ auto end_access_semaphore_handle =
+ vulkan_implementation()->GetSemaphoreHandle(device(),
+ end_access_semaphore);
EndAccessInternal(false /* readonly */,
- std::move(end_access_semphore_handle));
+ std::move(end_access_semaphore_handle));
auto* fence_helper =
context_state_->vk_context_provider()->GetDeviceQueue()->GetFenceHelper();
@@ -855,10 +827,69 @@ bool ExternalVkImageBacking::WritePixels(size_t data_size,
begin_access_semaphores);
fence_helper->EnqueueBufferCleanupForSubmittedWork(stage_buffer,
stage_allocation);
-
return true;
}
+bool ExternalVkImageBacking::WritePixels() {
+ std::vector<gpu::SemaphoreHandle> handles;
+ if (!BeginAccessInternal(false /* readonly */, &handles)) {
+ DLOG(ERROR) << "BeginAccess() failed.";
+ return false;
+ }
+
+ std::vector<GrBackendSemaphore> begin_access_semaphores;
+ begin_access_semaphores.reserve(handles.size() + 1);
+ for (auto& handle : handles) {
+ VkSemaphore semaphore = vulkan_implementation()->ImportSemaphoreHandle(
+ device(), std::move(handle));
+ begin_access_semaphores.emplace_back();
+ begin_access_semaphores.back().initVulkan(semaphore);
+ }
+
+ auto* gr_context = context_state_->gr_context();
+ gr_context->wait(begin_access_semaphores.size(),
+ begin_access_semaphores.data());
+
+ auto info = SkImageInfo::Make(size().width(), size().height(),
+ ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format()),
+ kOpaque_SkAlphaType);
+ SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(),
+ shared_memory_wrapper_.GetStride());
+
+ if (!gr_context->updateBackendTexture(backend_texture_, &pixmap,
+ /*levels=*/1, nullptr, nullptr)) {
+ DLOG(ERROR) << "updateBackendTexture() failed.";
+ }
+
+ if (!need_synchronization()) {
+ DCHECK(handles.empty());
+ EndAccessInternal(false /* readonly */, SemaphoreHandle());
+ return true;
+ }
+
+ VkSemaphore end_access_semaphore =
+ vulkan_implementation()->CreateExternalSemaphore(device());
+ GrBackendSemaphore end_access_backend_semaphore;
+ end_access_backend_semaphore.initVulkan(end_access_semaphore);
+
+ GrFlushInfo flush_info = {
+ .fNumSemaphores = 1,
+ .fSignalSemaphores = &end_access_backend_semaphore,
+ };
+
+ gr_context->flush(flush_info);
+ // Submit so the |end_access_semaphore| is ready for waiting.
+ gr_context->submit();
+
+ auto end_access_semaphore_handle =
+ vulkan_implementation()->GetSemaphoreHandle(device(),
+ end_access_semaphore);
+ EndAccessInternal(false /* readonly */,
+ std::move(end_access_semaphore_handle));
+ return true;
+} // namespace gpu
+
void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
DCHECK(use_separate_gl_texture());
DCHECK_NE(!!texture_, !!texture_passthrough_);
@@ -907,16 +938,16 @@ void ExternalVkImageBacking::CopyPixelsFromGLTextureToVkImage() {
ScopedPixelStore pack_skip_rows(api, GL_PACK_SKIP_ROWS, 0);
ScopedPixelStore pack_aligment(api, GL_PACK_ALIGNMENT, 1);
- WritePixels(checked_size.ValueOrDie(), 0,
- base::BindOnce(
- [](gl::GLApi* api, const gfx::Size& size, GLenum format,
- GLenum type, void* buffer) {
- api->glReadPixelsFn(0, 0, size.width(), size.height(),
- format, type, buffer);
- DCHECK_EQ(api->glGetErrorFn(),
- static_cast<GLenum>(GL_NO_ERROR));
- },
- api, size(), gl_format, gl_type));
+ WritePixelsWithCallback(
+ checked_size.ValueOrDie(), 0,
+ base::BindOnce(
+ [](gl::GLApi* api, const gfx::Size& size, GLenum format, GLenum type,
+ void* buffer) {
+ api->glReadPixelsFn(0, 0, size.width(), size.height(), format, type,
+ buffer);
+ DCHECK_EQ(api->glGetErrorFn(), static_cast<GLenum>(GL_NO_ERROR));
+ },
+ api, size(), gl_format, gl_type));
api->glBindFramebufferEXTFn(GL_READ_FRAMEBUFFER, old_framebuffer);
api->glDeleteFramebuffersEXTFn(1, &framebuffer);
}
@@ -957,9 +988,7 @@ void ExternalVkImageBacking::CopyPixelsFromShmToGLTexture() {
checked_size *= size().height();
DCHECK(checked_size.IsValid());
- auto pixel_data =
- shared_memory_mapping_.GetMemoryAsSpan<const uint8_t>().subspan(
- memory_offset_);
+ auto pixel_data = shared_memory_wrapper_.GetMemoryAsSpan();
api->glTexSubImage2DFn(GL_TEXTURE_2D, 0, 0, 0, size().width(),
size().height(), gl_format, gl_type,
pixel_data.data());
@@ -1023,7 +1052,9 @@ void ExternalVkImageBacking::EndAccessInternal(
is_write_in_progress_ = false;
}
- if (need_synchronization()) {
+ // synchronization is not needed if it is not the last gl access.
+ if (need_synchronization() && reads_in_progress_ == 0) {
+ DCHECK(!is_write_in_progress_);
DCHECK(semaphore_handle.is_valid());
if (readonly) {
read_semaphore_handles_.push_back(std::move(semaphore_handle));
@@ -1032,8 +1063,6 @@ void ExternalVkImageBacking::EndAccessInternal(
DCHECK(read_semaphore_handles_.empty());
write_semaphore_handle_ = std::move(semaphore_handle);
}
- } else {
- DCHECK(!semaphore_handle.is_valid());
}
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_backing.h b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
index 7e7dc67b627..e3d1103d649 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_backing.h
+++ b/chromium/gpu/command_buffer/service/external_vk_image_backing.h
@@ -9,7 +9,6 @@
#include <vector>
#include "base/memory/scoped_refptr.h"
-#include "base/memory/shared_memory_mapping.h"
#include "base/optional.h"
#include "base/util/type_safety/pass_key.h"
#include "build/build_config.h"
@@ -17,6 +16,7 @@
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_memory_region_wrapper.h"
#include "gpu/command_buffer/service/texture_manager.h"
#include "gpu/vulkan/semaphore_handle.h"
#include "gpu/vulkan/vulkan_device_queue.h"
@@ -99,6 +99,9 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
return !context_state()->support_vulkan_external_object();
}
+ uint32_t reads_in_progress() const { return reads_in_progress_; }
+ uint32_t gl_reads_in_progress() const { return gl_reads_in_progress_; }
+
// Notifies the backing that an access will start. Return false if there is
// currently any other conflict access in progress. Otherwise, returns true
// and semaphore handles which will be waited on before accessing.
@@ -157,17 +160,17 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
#endif
// Install a shared memory GMB to the backing.
- void InstallSharedMemory(
- base::WritableSharedMemoryMapping shared_memory_mapping,
- size_t stride,
- size_t memory_offset);
+ void InstallSharedMemory(SharedMemoryRegionWrapper shared_memory_wrapper);
// Returns texture_service_id for ProduceGLTexture and GLTexturePassthrough.
GLuint ProduceGLTextureInternal();
using FillBufferCallback = base::OnceCallback<void(void* buffer)>;
- bool WritePixels(size_t data_size,
- size_t stride,
- FillBufferCallback callback);
+ // TODO(penghuang): Remove it when GrContext::updateBackendTexture() supports
+ // compressed texture and callback.
+ bool WritePixelsWithCallback(size_t data_size,
+ size_t stride,
+ FillBufferCallback callback);
+ bool WritePixels();
void CopyPixelsFromGLTextureToVkImage();
void CopyPixelsFromShmToGLTexture();
@@ -181,13 +184,12 @@ class ExternalVkImageBacking final : public ClearTrackingSharedImageBacking {
bool is_write_in_progress_ = false;
uint32_t reads_in_progress_ = 0;
+ uint32_t gl_reads_in_progress_ = 0;
gles2::Texture* texture_ = nullptr;
scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
// GMB related stuff.
- base::WritableSharedMemoryMapping shared_memory_mapping_;
- size_t stride_ = 0;
- size_t memory_offset_ = 0;
+ SharedMemoryRegionWrapper shared_memory_wrapper_;
enum LatestContent {
kInVkImage = 1 << 0,
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
index 34fdcde0c1d..bbad54274a2 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_gl_representation.cc
@@ -77,8 +77,10 @@ bool ExternalVkImageGLRepresentationShared::BeginAccess(GLenum mode) {
}
DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
- mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
- const bool readonly = (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
+ const bool readonly =
+ (mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
std::vector<SemaphoreHandle> handles;
if (!backing_impl()->BeginAccess(readonly, &handles, true /* is_gl */))
@@ -111,16 +113,17 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
DCHECK(current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
current_access_mode_ ==
- GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
+ GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM ||
+ current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
const bool readonly =
- (current_access_mode_ == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM);
+ (current_access_mode_ != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM);
current_access_mode_ = 0;
- VkSemaphore semaphore = VK_NULL_HANDLE;
SemaphoreHandle semaphore_handle;
- GLuint gl_semaphore = 0;
- if (backing_impl()->need_synchronization()) {
- semaphore =
+ if (backing_impl()->need_synchronization() &&
+ backing_impl()->gl_reads_in_progress() <= 1) {
+ DCHECK(readonly == !!backing_impl()->gl_reads_in_progress());
+ VkSemaphore semaphore =
vk_implementation()->CreateExternalSemaphore(backing_impl()->device());
if (semaphore == VK_NULL_HANDLE) {
// TODO(crbug.com/933452): We should be able to handle this failure more
@@ -142,7 +145,8 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
}
SemaphoreHandle dup_semaphore_handle = semaphore_handle.Duplicate();
- gl_semaphore = ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle));
+ GLuint gl_semaphore =
+ ImportVkSemaphoreIntoGL(std::move(dup_semaphore_handle));
if (!gl_semaphore) {
// TODO(crbug.com/933452): We should be able to semaphore_handle this
@@ -152,24 +156,21 @@ void ExternalVkImageGLRepresentationShared::EndAccess() {
<< "Vulkan";
return;
}
- }
- GrVkImageInfo info;
- auto result = backing_impl()->backend_texture().getVkImageInfo(&info);
- DCHECK(result);
- GLenum dst_layout = ToGLImageLayout(info.fImageLayout);
- if (backing_impl()->need_synchronization()) {
+ GrVkImageInfo info;
+ auto result = backing_impl()->backend_texture().getVkImageInfo(&info);
+ DCHECK(result);
+ GLenum dst_layout = ToGLImageLayout(info.fImageLayout);
api()->glSignalSemaphoreEXTFn(gl_semaphore, 0, nullptr, 1,
&texture_service_id_, &dst_layout);
api()->glDeleteSemaphoresEXTFn(1, &gl_semaphore);
// Base on the spec, the glSignalSemaphoreEXT() call just inserts signal
// semaphore command in the gl context. It may or may not flush the context
- // which depends on the impelemntation. So to make it safe, we always call
+ // which depends on the implementation. So to make it safe, we always call
// glFlush() here. If the implementation does flush in the
// glSignalSemaphoreEXT() call, the glFlush() call should be a noop.
api()->glFlushFn();
}
-
backing_impl()->EndAccess(readonly, std::move(semaphore_handle),
true /* is_gl */);
}
diff --git a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
index 692eb8feadd..3211b8b59c8 100644
--- a/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
+++ b/chromium/gpu/command_buffer/service/external_vk_image_skia_representation.cc
@@ -61,12 +61,12 @@ sk_sp<SkSurface> ExternalVkImageSkiaRepresentation::BeginWriteAccess(
final_msaa_count != surface_msaa_count_) {
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
true /* gpu_compositing */, format());
- surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ surface = SkSurface::MakeFromBackendTexture(
gr_context, promise_texture->backendTexture(), kTopLeft_GrSurfaceOrigin,
final_msaa_count, sk_color_type,
backing_impl()->color_space().ToSkColorSpace(), &surface_props);
if (!surface) {
- LOG(ERROR) << "MakeFromBackendTextureAsRenderTarget() failed.";
+ LOG(ERROR) << "MakeFromBackendTexture() failed.";
backing_impl()->context_state()->EraseCachedSkSurface(this);
return nullptr;
}
diff --git a/chromium/gpu/command_buffer/service/feature_info.cc b/chromium/gpu/command_buffer/service/feature_info.cc
index 20e5298d9f6..ca410861576 100644
--- a/chromium/gpu/command_buffer/service/feature_info.cc
+++ b/chromium/gpu/command_buffer/service/feature_info.cc
@@ -170,35 +170,6 @@ bool IsWebGLDrawBuffersSupported(bool webglCompatibilityContext,
} // anonymous namespace.
-namespace {
-
-enum GpuTextureResultR16_L16 {
- // Values synced with 'GpuTextureResultR16_L16' in
- // src/tools/metrics/histograms/histograms.xml
- kHaveNone = 0,
- kHaveR16 = 1,
- kHaveL16 = 2,
- kHaveR16AndL16 = 3,
- kMax = kHaveR16AndL16
-};
-
-// TODO(riju): For UMA, remove after crbug.com/759456 is resolved.
-bool g_r16_is_present;
-bool g_l16_is_present;
-
-GpuTextureResultR16_L16 GpuTextureUMAHelper() {
- if (g_r16_is_present && g_l16_is_present) {
- return GpuTextureResultR16_L16::kHaveR16AndL16;
- } else if (g_r16_is_present) {
- return GpuTextureResultR16_L16::kHaveR16;
- } else if (g_l16_is_present) {
- return GpuTextureResultR16_L16::kHaveL16;
- }
- return GpuTextureResultR16_L16::kHaveNone;
-}
-
-} // anonymous namespace.
-
FeatureInfo::FeatureFlags::FeatureFlags() = default;
FeatureInfo::FeatureInfo() {
@@ -250,11 +221,6 @@ void FeatureInfo::InitializeBasicState(const base::CommandLine* command_line) {
feature_flags_.is_swiftshader_for_webgl =
(useGL == gl::kGLImplementationSwiftShaderForWebGLName);
- feature_flags_.is_swiftshader =
- (useGL == gl::kGLImplementationSwiftShaderName) ||
- ((useGL == gl::kGLImplementationANGLEName) &&
- (useANGLE == gl::kANGLEImplementationSwiftShaderName));
-
// The shader translator is needed to translate from WebGL-conformant GLES SL
// to normal GLES SL, enforce WebGL conformance, translate from GLES SL 1.0 to
// target context GLSL, implement emulation of OpenGL ES features on OpenGL,
@@ -420,6 +386,13 @@ void FeatureInfo::EnableCHROMIUMColorBufferFloatRGB() {
AddExtensionString("GL_CHROMIUM_color_buffer_float_rgb");
}
+void FeatureInfo::EnableOESDrawBuffersIndexed() {
+ if (!feature_flags_.oes_draw_buffers_indexed) {
+ AddExtensionString("GL_OES_draw_buffers_indexed");
+ feature_flags_.oes_draw_buffers_indexed = true;
+ }
+}
+
void FeatureInfo::EnableOESFboRenderMipmap() {
if (!feature_flags_.oes_fbo_render_mipmap) {
AddExtensionString("GL_OES_fbo_render_mipmap");
@@ -778,6 +751,14 @@ void FeatureInfo::InitializeFeatures() {
validators_.index_type.AddValue(GL_UNSIGNED_INT);
}
+ // Note (crbug.com/1058744): not implemented for validating command decoder
+ if (is_passthrough_cmd_decoder_ &&
+ gfx::HasExtension(extensions, "GL_OES_draw_buffers_indexed")) {
+ if (!disallowed_features_.oes_draw_buffers_indexed) {
+ EnableOESDrawBuffersIndexed();
+ }
+ }
+
if (gl_version_info_->IsAtLeastGL(3, 0) || gl_version_info_->is_es3 ||
gfx::HasExtension(extensions, "GL_OES_fbo_render_mipmap") ||
gfx::HasExtension(extensions, "GL_EXT_framebuffer_object")) {
@@ -1461,7 +1442,6 @@ void FeatureInfo::InitializeFeatures() {
gfx::HasExtension(extensions, "GL_EXT_texture_norm16"))) {
AddExtensionString("GL_EXT_texture_norm16");
feature_flags_.ext_texture_norm16 = true;
- g_r16_is_present = true;
validators_.pixel_type.AddValue(GL_UNSIGNED_SHORT);
validators_.pixel_type.AddValue(GL_SHORT);
@@ -1511,10 +1491,6 @@ void FeatureInfo::InitializeFeatures() {
feature_flags_.gpu_memory_buffer_formats.Add(gfx::BufferFormat::R_16);
}
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.TextureR16Ext_LuminanceF16", GpuTextureUMAHelper(),
- static_cast<int>(GpuTextureResultR16_L16::kMax) + 1);
-
if (enable_es3 && gfx::HasExtension(extensions, "GL_EXT_window_rectangles")) {
AddExtensionString("GL_EXT_window_rectangles");
feature_flags_.ext_window_rectangles = true;
@@ -1945,9 +1921,6 @@ void FeatureInfo::InitializeFloatAndHalfFloatFeatures(
validators_.texture_internal_format_storage.AddValue(
GL_LUMINANCE_ALPHA16F_EXT);
}
-
- g_l16_is_present =
- enable_texture_half_float && feature_flags_.ext_texture_storage;
}
bool FeatureInfo::IsES3Capable() const {
diff --git a/chromium/gpu/command_buffer/service/feature_info.h b/chromium/gpu/command_buffer/service/feature_info.h
index 4a3255b6b6e..ac6c4d8e393 100644
--- a/chromium/gpu/command_buffer/service/feature_info.h
+++ b/chromium/gpu/command_buffer/service/feature_info.h
@@ -90,7 +90,6 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool ext_discard_framebuffer = false;
bool angle_depth_texture = false;
bool is_swiftshader_for_webgl = false;
- bool is_swiftshader = false;
bool chromium_texture_filtering_hint = false;
bool angle_texture_usage = false;
bool ext_texture_storage = false;
@@ -151,6 +150,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
bool webgl_multi_draw_instanced_base_vertex_base_instance = false;
bool ext_texture_compression_bptc = false;
bool ext_texture_compression_rgtc = false;
+ bool oes_draw_buffers_indexed = false;
};
FeatureInfo();
@@ -213,6 +213,7 @@ class GPU_GLES2_EXPORT FeatureInfo : public base::RefCounted<FeatureInfo> {
void EnableEXTColorBufferFloat();
void EnableEXTColorBufferHalfFloat();
void EnableEXTTextureFilterAnisotropic();
+ void EnableOESDrawBuffersIndexed();
void EnableOESFboRenderMipmap();
void EnableOESTextureFloatLinear();
void EnableOESTextureHalfFloatLinear();
diff --git a/chromium/gpu/command_buffer/service/gl_context_virtual.cc b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
index c6e000cd9a6..c8662354eb6 100644
--- a/chromium/gpu/command_buffer/service/gl_context_virtual.cc
+++ b/chromium/gpu/command_buffer/service/gl_context_virtual.cc
@@ -87,10 +87,12 @@ void GLContextVirtual::SetSafeToForceGpuSwitch() {
}
unsigned int GLContextVirtual::CheckStickyGraphicsResetStatus() {
- // Don't pretend we know which one of the virtual contexts was responsible.
unsigned int reset_status = shared_context_->CheckStickyGraphicsResetStatus();
- return reset_status == GL_NO_ERROR ? GL_NO_ERROR
- : GL_UNKNOWN_CONTEXT_RESET_ARB;
+ if (reset_status == GL_NO_ERROR)
+ return GL_NO_ERROR;
+ shared_context_->MarkVirtualContextLost();
+ // Don't pretend we know which one of the virtual contexts was responsible.
+ return GL_UNKNOWN_CONTEXT_RESET_ARB;
}
void GLContextVirtual::SetUnbindFboOnMakeCurrent() {
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
index 501e5cc35d4..27084971eff 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image.h
@@ -16,12 +16,6 @@ namespace gles2 {
// that supply a texture matrix.
class GPU_GLES2_EXPORT GLStreamTextureImage : public gl::GLImage {
public:
- // Get the matrix.
- // Copy the texture matrix for this image into |matrix|.
- // Subclasses must return a matrix appropriate for a coordinate system where
- // UV=(0,0) corresponds to the top left corner of the image.
- virtual void GetTextureMatrix(float matrix[16]) = 0;
-
// TODO(weiliangc): When Overlay is moved off command buffer and we use
// SharedImage in all cases, this API should be deleted.
virtual void NotifyPromotionHint(bool promotion_hint,
@@ -32,19 +26,6 @@ class GPU_GLES2_EXPORT GLStreamTextureImage : public gl::GLImage {
protected:
~GLStreamTextureImage() override = default;
-
- // Convenience function for subclasses that deal with SurfaceTextures, whose
- // coordinate system has (0,0) at the bottom left of the image.
- // [ a e i m ] [ 1 0 0 0 ] [ a -e i m+e ]
- // [ b f j n ] [ 0 -1 0 1 ] = [ b -f j n+f ]
- // [ c g k o ] [ 0 0 1 0 ] [ c -g k o+g ]
- // [ d h l p ] [ 0 0 0 1 ] [ d -h l p+h ]
- static void YInvertMatrix(float matrix[16]) {
- for (int i = 0; i < 4; ++i) {
- matrix[i + 12] += matrix[i + 4];
- matrix[i + 4] = -matrix[i + 4];
- }
- }
};
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
index 9c35dae2934..1c7271618ca 100644
--- a/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
+++ b/chromium/gpu/command_buffer/service/gl_stream_texture_image_stub.h
@@ -42,7 +42,6 @@ class GLStreamTextureImageStub : public GLStreamTextureImage {
bool EmulatingRGB() const override;
// Overridden from GLStreamTextureImage:
- void GetTextureMatrix(float matrix[16]) override {}
void NotifyPromotionHint(bool promotion_hint,
int display_x,
int display_y,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
index 2eb24507574..c84b347b150 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.cc
@@ -464,12 +464,9 @@ std::string GetFragmentShaderSource(unsigned glslVersion,
// Main shader source.
source +=
"uniform SamplerType u_sampler;\n"
- "uniform mat4 u_tex_coord_transform;\n"
"VARYING TexCoordPrecision vec2 v_uv;\n"
"void main(void) {\n"
- " TexCoordPrecision vec4 uv =\n"
- " u_tex_coord_transform * vec4(v_uv, 0, 1);\n"
- " vec4 color = TextureLookup(u_sampler, uv.st);\n";
+ " vec4 color = TextureLookup(u_sampler, v_uv);\n";
// Premultiply or un-premultiply alpha. Must always do this, even
// if the destination format doesn't have an alpha channel.
@@ -927,59 +924,12 @@ class CopyTextureResourceManagerImpl
bool dither,
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) override;
- void DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) override;
- void DoCopyTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) override;
-
private:
struct ProgramInfo {
ProgramInfo()
: program(0u),
vertex_source_mult_handle(0u),
vertex_source_add_handle(0u),
- tex_coord_transform_handle(0u),
sampler_handle(0u) {}
GLuint program;
@@ -989,7 +939,6 @@ class CopyTextureResourceManagerImpl
GLuint vertex_source_mult_handle;
GLuint vertex_source_add_handle;
- GLuint tex_coord_transform_handle;
GLuint sampler_handle;
};
@@ -1017,7 +966,6 @@ class CopyTextureResourceManagerImpl
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
- const GLfloat transform_matrix[16],
CopyTexImageResourceManager* luma_emulation_blitter);
bool initialized_;
@@ -1117,32 +1065,6 @@ void CopyTextureResourceManagerImpl::Destroy() {
buffer_id_ = 0;
}
-void CopyTextureResourceManagerImpl::DoCopyTexture(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- CopyTextureMethod method,
- gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
- // Use kIdentityMatrix if no transform passed in.
- DoCopyTextureWithTransform(
- decoder, source_target, source_id, source_level, source_internal_format,
- dest_target, dest_id, dest_level, dest_internal_format, width, height,
- flip_y, premultiply_alpha, unpremultiply_alpha, dither, kIdentityMatrix,
- method, luma_emulation_blitter);
-}
-
void CopyTextureResourceManagerImpl::DoCopySubTexture(
DecoderContext* decoder,
GLenum source_target,
@@ -1210,12 +1132,12 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture(
dest_height = height;
}
- DoCopySubTextureWithTransform(
+ DoCopyTextureInternal(
decoder, source_target, source_id, source_level, source_internal_format,
dest_target, dest_texture, dest_level, dest_internal_format, dest_xoffset,
dest_yoffset, x, y, width, height, dest_width, dest_height, source_width,
source_height, flip_y, premultiply_alpha, unpremultiply_alpha, dither,
- kIdentityMatrix, luma_emulation_blitter);
+ luma_emulation_blitter);
if (method == CopyTextureMethod::DRAW_AND_COPY ||
method == CopyTextureMethod::DRAW_AND_READBACK) {
@@ -1237,41 +1159,7 @@ void CopyTextureResourceManagerImpl::DoCopySubTexture(
}
}
-void CopyTextureResourceManagerImpl::DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
- DoCopyTextureInternal(
- decoder, source_target, source_id, source_level, source_internal_format,
- dest_target, dest_id, dest_level, dest_internal_format, xoffset, yoffset,
- x, y, width, height, dest_width, dest_height, source_width, source_height,
- flip_y, premultiply_alpha, unpremultiply_alpha, dither, transform_matrix,
- luma_emulation_blitter);
-}
-
-void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
+void CopyTextureResourceManagerImpl::DoCopyTexture(
DecoderContext* decoder,
GLenum source_target,
GLuint source_id,
@@ -1287,7 +1175,6 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
- const GLfloat transform_matrix[16],
CopyTextureMethod method,
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
GLsizei dest_width = width;
@@ -1326,12 +1213,11 @@ void CopyTextureResourceManagerImpl::DoCopyTextureWithTransform(
dest_internal_format = adjusted_internal_format;
}
- DoCopyTextureInternal(decoder, source_target, source_id, source_level,
- source_internal_format, dest_target, dest_texture,
- dest_level, dest_internal_format, 0, 0, 0, 0, width,
- height, dest_width, dest_height, width, height, flip_y,
- premultiply_alpha, unpremultiply_alpha, dither,
- transform_matrix, luma_emulation_blitter);
+ DoCopyTextureInternal(
+ decoder, source_target, source_id, source_level, source_internal_format,
+ dest_target, dest_texture, dest_level, dest_internal_format, 0, 0, 0, 0,
+ width, height, dest_width, dest_height, width, height, flip_y,
+ premultiply_alpha, unpremultiply_alpha, dither, luma_emulation_blitter);
if (method == CopyTextureMethod::DRAW_AND_COPY ||
method == CopyTextureMethod::DRAW_AND_READBACK) {
@@ -1375,7 +1261,6 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
bool premultiply_alpha,
bool unpremultiply_alpha,
bool dither,
- const GLfloat transform_matrix[16],
gpu::gles2::CopyTexImageResourceManager* luma_emulation_blitter) {
DCHECK(source_target == GL_TEXTURE_2D ||
source_target == GL_TEXTURE_RECTANGLE_ARB ||
@@ -1465,15 +1350,10 @@ void CopyTextureResourceManagerImpl::DoCopyTextureInternal(
info->vertex_source_add_handle =
glGetUniformLocation(info->program, "u_vertex_source_add");
- info->tex_coord_transform_handle =
- glGetUniformLocation(info->program, "u_tex_coord_transform");
info->sampler_handle = glGetUniformLocation(info->program, "u_sampler");
}
glUseProgram(info->program);
- glUniformMatrix4fv(info->tex_coord_transform_handle, 1, GL_FALSE,
- transform_matrix);
-
// Note: For simplicity, the calculations in this comment block use a single
// dimension. All calculations trivially extend to the x-y plane.
// The target subrange in the source texture has coordinates [x, x + width].
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
index 351e181a635..33207ea04a0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_copy_texture_chromium.h
@@ -103,57 +103,6 @@ class GPU_GLES2_EXPORT CopyTextureCHROMIUMResourceManager {
CopyTextureMethod method,
CopyTexImageResourceManager* luma_emulation_blitter) = 0;
- virtual void DoCopySubTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_internal_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_internal_format,
- GLint xoffset,
- GLint yoffset,
- GLint x,
- GLint y,
- GLsizei width,
- GLsizei height,
- GLsizei dest_width,
- GLsizei dest_height,
- GLsizei source_width,
- GLsizei source_height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTexImageResourceManager* luma_emulation_blitter) = 0;
-
- // This will apply a transform on the texture coordinates before sampling
- // the source texture and copying to the destination texture. The transform
- // matrix should be given in column-major form, so it can be passed
- // directly to GL.
- virtual void DoCopyTextureWithTransform(
- DecoderContext* decoder,
- GLenum source_target,
- GLuint source_id,
- GLint source_level,
- GLenum source_format,
- GLenum dest_target,
- GLuint dest_id,
- GLint dest_level,
- GLenum dest_format,
- GLsizei width,
- GLsizei height,
- bool flip_y,
- bool premultiply_alpha,
- bool unpremultiply_alpha,
- bool dither,
- const GLfloat transform_matrix[16],
- CopyTextureMethod method,
- CopyTexImageResourceManager* luma_emulation_blitter) = 0;
-
// The attributes used during invocation of the extension.
static const GLuint kVertexPositionAttrib = 0;
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
index d729023a0b9..250c811b727 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -1092,8 +1092,7 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Callback for async SwapBuffers.
void FinishAsyncSwapBuffers(uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence>);
+ gfx::SwapCompletionResult result);
void FinishSwapBuffers(gfx::SwapResult result);
void DoCommitOverlayPlanes(uint64_t swap_id, GLbitfield flags);
@@ -1703,6 +1702,9 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Wrapper for glDisable
void DoDisable(GLenum cap);
+ // Wrapper for glDisableiOES
+ void DoDisableiOES(GLenum target, GLuint index);
+
// Wrapper for glDisableVertexAttribArray.
void DoDisableVertexAttribArray(GLuint index);
@@ -1737,6 +1739,9 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Wrapper for glEnable
void DoEnable(GLenum cap);
+ // Wrapper for glEnableiOES
+ void DoEnableiOES(GLenum target, GLuint index);
+
// Wrapper for glEnableVertexAttribArray.
void DoEnableVertexAttribArray(GLuint index);
@@ -1809,11 +1814,17 @@ class GLES2DecoderImpl : public GLES2Decoder,
// Wrapper for glGetIntegerv.
void DoGetIntegerv(GLenum pname, GLint* params, GLsizei params_size);
- // Helper for DoGetIntegeri_v and DoGetInteger64i_v.
+ // Helper for DoGetBooleani_v, DoGetIntegeri_v and DoGetInteger64i_v.
template <typename TYPE>
void GetIndexedIntegerImpl(
const char* function_name, GLenum target, GLuint index, TYPE* data);
+ // Wrapper for glGetBooleani_v.
+ void DoGetBooleani_v(GLenum target,
+ GLuint index,
+ GLboolean* params,
+ GLsizei params_size);
+
// Wrapper for glGetIntegeri_v.
void DoGetIntegeri_v(GLenum target,
GLuint index,
@@ -1925,6 +1936,8 @@ class GLES2DecoderImpl : public GLES2Decoder,
bool DoIsVertexArrayOES(GLuint client_id);
bool DoIsSync(GLuint client_id);
+ bool DoIsEnablediOES(GLenum target, GLuint index);
+
void DoLineWidth(GLfloat width);
// Wrapper for glLinkProgram
@@ -4116,8 +4129,7 @@ gpu::ContextResult GLES2DecoderImpl::Initialize(
InitializeGLDebugLogging(true, GLDebugMessageCallback, &logger_);
}
- if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
- feature_info_->feature_flags().is_swiftshader) {
+ if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
}
@@ -7778,6 +7790,7 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl(
state_.GetWindowRectangle(index, data);
return;
}
+
scoped_refptr<IndexedBufferBindingHost> bindings;
switch (target) {
case GL_TRANSFORM_FEEDBACK_BUFFER_BINDING:
@@ -7798,6 +7811,16 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl(
}
bindings = state_.indexed_uniform_buffer_bindings.get();
break;
+ case GL_BLEND_SRC_RGB:
+ case GL_BLEND_SRC_ALPHA:
+ case GL_BLEND_DST_RGB:
+ case GL_BLEND_DST_ALPHA:
+ case GL_BLEND_EQUATION_RGB:
+ case GL_BLEND_EQUATION_ALPHA:
+ case GL_COLOR_WRITEMASK:
+ // Note (crbug.com/1058744): not implemented for validating command
+ // decoder
+ break;
default:
NOTREACHED();
break;
@@ -7819,12 +7842,29 @@ void GLES2DecoderImpl::GetIndexedIntegerImpl(
case GL_UNIFORM_BUFFER_START:
*data = static_cast<TYPE>(bindings->GetBufferStart(index));
break;
+ case GL_BLEND_SRC_RGB:
+ case GL_BLEND_SRC_ALPHA:
+ case GL_BLEND_DST_RGB:
+ case GL_BLEND_DST_ALPHA:
+ case GL_BLEND_EQUATION_RGB:
+ case GL_BLEND_EQUATION_ALPHA:
+ case GL_COLOR_WRITEMASK:
+ // Note (crbug.com/1058744): not implemented for validating command
+ // decoder
+ break;
default:
NOTREACHED();
break;
}
}
+void GLES2DecoderImpl::DoGetBooleani_v(GLenum target,
+ GLuint index,
+ GLboolean* params,
+ GLsizei params_size) {
+ GetIndexedIntegerImpl<GLboolean>("glGetBooleani_v", target, index, params);
+}
+
void GLES2DecoderImpl::DoGetIntegeri_v(GLenum target,
GLuint index,
GLint* params,
@@ -8358,6 +8398,10 @@ void GLES2DecoderImpl::DoDisable(GLenum cap) {
}
}
+void GLES2DecoderImpl::DoDisableiOES(GLenum target, GLuint index) {
+ api()->glDisableiOESFn(target, index);
+}
+
void GLES2DecoderImpl::DoEnable(GLenum cap) {
if (SetCapabilityState(cap, true)) {
if (cap == GL_PRIMITIVE_RESTART_FIXED_INDEX &&
@@ -8375,6 +8419,10 @@ void GLES2DecoderImpl::DoEnable(GLenum cap) {
}
}
+void GLES2DecoderImpl::DoEnableiOES(GLenum target, GLuint index) {
+ api()->glEnableiOESFn(target, index);
+}
+
void GLES2DecoderImpl::DoDepthRangef(GLclampf znear, GLclampf zfar) {
state_.z_near = base::ClampToRange(znear, 0.0f, 1.0f);
state_.z_far = base::ClampToRange(zfar, 0.0f, 1.0f);
@@ -10418,32 +10466,9 @@ void GLES2DecoderImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
GLint fake_location,
GLboolean transpose,
const volatile GLfloat* transform) {
- float gl_matrix[16];
-
// This refers to the bound external texture on the active unit.
TextureUnit& unit = state_.texture_units[state_.active_texture_unit];
- if (TextureRef* texture_ref = unit.bound_texture_external_oes.get()) {
- if (GLStreamTextureImage* image =
- texture_ref->texture()->GetLevelStreamTextureImage(
- GL_TEXTURE_EXTERNAL_OES, 0)) {
- gfx::Transform st_transform(gfx::Transform::kSkipInitialization);
- gfx::Transform pre_transform(gfx::Transform::kSkipInitialization);
- image->GetTextureMatrix(gl_matrix);
- st_transform.matrix().setColMajorf(gl_matrix);
- // const_cast is safe, because setColMajorf only does a memcpy.
- // TODO(piman): can we remove this assumption without having to introduce
- // an extra copy?
- pre_transform.matrix().setColMajorf(
- const_cast<const GLfloat*>(transform));
- gfx::Transform(pre_transform, st_transform)
- .matrix()
- .asColMajorf(gl_matrix);
- } else {
- // Missing stream texture. Treat matrix as identity.
- memcpy(gl_matrix, const_cast<const GLfloat*>(transform),
- sizeof(gl_matrix));
- }
- } else {
+ if (!unit.bound_texture_external_oes.get()) {
LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION,
"DoUniformMatrix4vStreamTextureMatrix",
"no texture bound");
@@ -10459,7 +10484,8 @@ void GLES2DecoderImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
return;
}
- api()->glUniformMatrix4fvFn(real_location, count, transpose, gl_matrix);
+ api()->glUniformMatrix4fvFn(real_location, count, transpose,
+ const_cast<const GLfloat*>(transform));
}
void GLES2DecoderImpl::DoUniformMatrix2x3fv(GLint fake_location,
@@ -12382,6 +12408,11 @@ bool GLES2DecoderImpl::DoIsEnabled(GLenum cap) {
return state_.GetEnabled(cap);
}
+bool GLES2DecoderImpl::DoIsEnablediOES(GLenum target, GLuint index) {
+ // Note (crbug.com/1058744): not implemented for validating command decoder
+ return false;
+}
+
bool GLES2DecoderImpl::DoIsBuffer(GLuint client_id) {
const Buffer* buffer = GetBuffer(client_id);
return buffer && buffer->IsValid() && !buffer->IsDeleted();
@@ -16997,14 +17028,13 @@ void GLES2DecoderImpl::DoSwapBuffers(uint64_t swap_id, GLbitfield flags) {
void GLES2DecoderImpl::FinishAsyncSwapBuffers(
uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ gfx::SwapCompletionResult result) {
TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
// Handling of the out-fence should have already happened before reaching
// this function, so we don't expect to get a valid fence here.
- DCHECK(!gpu_fence);
+ DCHECK(!result.gpu_fence);
- FinishSwapBuffers(result);
+ FinishSwapBuffers(result.swap_result);
}
void GLES2DecoderImpl::FinishSwapBuffers(gfx::SwapResult result) {
@@ -17438,7 +17468,7 @@ error::Error GLES2DecoderImpl::HandleDescheduleUntilFinishedCHROMIUM(
if (fence)
deschedule_until_finished_fences_.push_back(std::move(fence));
- if (deschedule_until_finished_fences_.size() == 1)
+ if (deschedule_until_finished_fences_.size() <= 1)
return error::kNoError;
DCHECK_EQ(2u, deschedule_until_finished_fences_.size());
@@ -18210,24 +18240,6 @@ void GLES2DecoderImpl::DoCopyTextureCHROMIUM(
unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
unpack_unmultiply_alpha == GL_TRUE, false /* dither */);
- // GL_TEXTURE_EXTERNAL_OES texture requires that we apply a transform matrix
- // before presenting.
- if (source_target == GL_TEXTURE_EXTERNAL_OES) {
- if (GLStreamTextureImage* texture_image =
- source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
- source_level)) {
- GLfloat transform_matrix[16];
- texture_image->GetTextureMatrix(transform_matrix);
- copy_texture_chromium_->DoCopyTextureWithTransform(
- this, source_target, source_texture->service_id(), source_level,
- source_internal_format, dest_target, dest_texture->service_id(),
- dest_level, internal_format, source_width, source_height,
- unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, false /* dither */,
- transform_matrix, method, copy_tex_image_blit_.get());
- return;
- }
- }
copy_texture_chromium_->DoCopyTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
@@ -18431,26 +18443,6 @@ void GLES2DecoderImpl::CopySubTextureHelper(const char* function_name,
DoBindOrCopyTexImageIfNeeded(source_texture, source_target, 0);
- // GL_TEXTURE_EXTERNAL_OES texture requires apply a transform matrix
- // before presenting.
- if (source_target == GL_TEXTURE_EXTERNAL_OES) {
- if (GLStreamTextureImage* texture_image =
- source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
- source_level)) {
- GLfloat transform_matrix[16];
- texture_image->GetTextureMatrix(transform_matrix);
- copy_texture_chromium_->DoCopySubTextureWithTransform(
- this, source_target, source_texture->service_id(), source_level,
- source_internal_format, dest_target, dest_texture->service_id(),
- dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
- height, dest_width, dest_height, source_width, source_height,
- unpack_flip_y == GL_TRUE, unpack_premultiply_alpha == GL_TRUE,
- unpack_unmultiply_alpha == GL_TRUE, dither == GL_TRUE,
- transform_matrix, copy_tex_image_blit_.get());
- return;
- }
- }
-
CopyTextureMethod method = GetCopyTextureCHROMIUMMethod(
GetFeatureInfo(), source_target, source_level, source_internal_format,
source_type, dest_binding_target, dest_level, dest_internal_format,
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
index 7491797e7fd..147e831f5cf 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder.h
@@ -69,6 +69,7 @@ struct GPU_GLES2_EXPORT DisallowedFeatures {
oes_texture_half_float_linear = false;
ext_float_blend = false;
oes_fbo_render_mipmap = false;
+ oes_draw_buffers_indexed = false;
}
bool operator==(const DisallowedFeatures& other) const {
@@ -85,6 +86,7 @@ struct GPU_GLES2_EXPORT DisallowedFeatures {
bool oes_texture_half_float_linear = false;
bool ext_float_blend = false;
bool oes_fbo_render_mipmap = false;
+ bool oes_draw_buffers_indexed = false;
};
// This class implements the DecoderContext interface, decoding GLES2
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
index f8be401bf99..615acf88e5a 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_autogen.h
@@ -1314,6 +1314,43 @@ error::Error GLES2DecoderImpl::HandleGetBooleanv(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleGetBooleani_v(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2OrES3OrHigherContext())
+ return error::kUnknownCommand;
+ const volatile gles2::cmds::GetBooleani_v& c =
+ *static_cast<const volatile gles2::cmds::GetBooleani_v*>(cmd_data);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLuint index = static_cast<GLuint>(c.index);
+ typedef cmds::GetBooleani_v::Result Result;
+ GLsizei num_values = 0;
+ if (!GetNumValuesReturnedForGLGet(pname, &num_values)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM(":GetBooleani_v", pname, "pname");
+ return error::kNoError;
+ }
+ uint32_t checked_size = 0;
+ if (!Result::ComputeSize(num_values).AssignIfValid(&checked_size)) {
+ return error::kOutOfBounds;
+ }
+ Result* result = GetSharedMemoryAs<Result*>(c.data_shm_id, c.data_shm_offset,
+ checked_size);
+ GLboolean* data = result ? result->GetData() : nullptr;
+ if (!validators_->indexed_g_l_state.IsValid(pname)) {
+ LOCAL_SET_GL_ERROR_INVALID_ENUM("glGetBooleani_v", pname, "pname");
+ return error::kNoError;
+ }
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ // Check that the client initialized the result.
+ if (result->size != 0) {
+ return error::kInvalidArguments;
+ }
+ DoGetBooleani_v(pname, index, data, num_values);
+ result->SetNumResults(num_values);
+ return error::kNoError;
+}
error::Error GLES2DecoderImpl::HandleGetBufferParameteri64v(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -5617,6 +5654,141 @@ error::Error GLES2DecoderImpl::HandleEndBatchReadAccessSharedImageCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderImpl::HandleEnableiOES(uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::EnableiOES& c =
+ *static_cast<const volatile gles2::cmds::EnableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ DoEnableiOES(target, index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleDisableiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::DisableiOES& c =
+ *static_cast<const volatile gles2::cmds::DisableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ DoDisableiOES(target, index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquationiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum mode = static_cast<GLenum>(c.mode);
+ api()->glBlendEquationiOESFn(buf, mode);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendEquationSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum modeRGB = static_cast<GLenum>(c.modeRGB);
+ GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha);
+ api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFunciOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFunciOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFunciOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum src = static_cast<GLenum>(c.src);
+ GLenum dst = static_cast<GLenum>(c.dst);
+ api()->glBlendFunciOESFn(buf, src, dst);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleBlendFuncSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFuncSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFuncSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum srcRGB = static_cast<GLenum>(c.srcRGB);
+ GLenum dstRGB = static_cast<GLenum>(c.dstRGB);
+ GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha);
+ GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha);
+ api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleColorMaskiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ColorMaskiOES& c =
+ *static_cast<const volatile gles2::cmds::ColorMaskiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLboolean r = static_cast<GLboolean>(c.r);
+ GLboolean g = static_cast<GLboolean>(c.g);
+ GLboolean b = static_cast<GLboolean>(c.b);
+ GLboolean a = static_cast<GLboolean>(c.a);
+ api()->glColorMaskiOESFn(buf, r, g, b, a);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderImpl::HandleIsEnablediOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::IsEnablediOES& c =
+ *static_cast<const volatile gles2::cmds::IsEnablediOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ typedef cmds::IsEnablediOES::Result Result;
+ Result* result_dst = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result_dst));
+ if (!result_dst) {
+ return error::kOutOfBounds;
+ }
+ *result_dst = DoIsEnablediOES(target, index);
+ return error::kNoError;
+}
+
bool GLES2DecoderImpl::SetCapabilityState(GLenum cap, bool enabled) {
switch (cap) {
case GL_BLEND:
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
index 6736ed33120..3fa8cacc086 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.cc
@@ -24,6 +24,7 @@
#include "gpu/command_buffer/service/program_cache.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "ui/gl/gl_version_info.h"
+#include "ui/gl/gpu_switching_manager.h"
#include "ui/gl/progress_reporter.h"
#if defined(OS_WIN)
@@ -1092,8 +1093,7 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
bound_buffers_[GL_DISPATCH_INDIRECT_BUFFER] = 0;
}
- if (feature_info_->feature_flags().chromium_texture_filtering_hint &&
- feature_info_->feature_flags().is_swiftshader) {
+ if (feature_info_->feature_flags().chromium_texture_filtering_hint) {
api()->glHintFn(GL_TEXTURE_FILTERING_HINT_CHROMIUM, GL_NICEST);
}
@@ -1210,6 +1210,11 @@ gpu::ContextResult GLES2DecoderPassthroughImpl::Initialize(
api()->glDisableFn(GL_TEXTURE_RECTANGLE_ANGLE);
#endif
+ // Register this object as a GPU switching observer.
+ if (feature_info_->IsWebGLContext()) {
+ ui::GpuSwitchingManager::GetInstance()->AddObserver(this);
+ }
+
set_initialized();
return gpu::ContextResult::kSuccess;
}
@@ -1315,6 +1320,11 @@ void GLES2DecoderPassthroughImpl::Destroy(bool have_context) {
}
deschedule_until_finished_fences_.clear();
+ // Unregister this object as a GPU switching observer.
+ if (feature_info_->IsWebGLContext()) {
+ ui::GpuSwitchingManager::GetInstance()->RemoveObserver(this);
+ }
+
// Destroy the surface before the context, some surface destructors make GL
// calls.
surface_ = nullptr;
@@ -1872,6 +1882,12 @@ gpu::gles2::Logger* GLES2DecoderPassthroughImpl::GetLogger() {
return &logger_;
}
+void GLES2DecoderPassthroughImpl::OnGpuSwitched(
+ gl::GpuPreference active_gpu_heuristic) {
+ // Send OnGpuSwitched notification to renderer process via decoder client.
+ client()->OnGpuSwitched(active_gpu_heuristic);
+}
+
void GLES2DecoderPassthroughImpl::BeginDecoding() {
gpu_tracer_->BeginDecoding();
gpu_trace_commands_ = gpu_tracer_->IsTracing() && *gpu_decoder_category_;
@@ -2865,14 +2881,13 @@ bool GLES2DecoderPassthroughImpl::IsEmulatedFramebufferBound(
void GLES2DecoderPassthroughImpl::CheckSwapBuffersAsyncResult(
const char* function_name,
uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ gfx::SwapCompletionResult result) {
TRACE_EVENT_ASYNC_END0("gpu", "AsyncSwapBuffers", swap_id);
// Handling of the out-fence should have already happened before reaching
// this function, so we don't expect to get a valid fence here.
- DCHECK(!gpu_fence);
+ DCHECK(!result.gpu_fence);
- CheckSwapBuffersResult(result, function_name);
+ CheckSwapBuffersResult(result.swap_result, function_name);
}
error::Error GLES2DecoderPassthroughImpl::CheckSwapBuffersResult(
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
index ade661ee2af..7ff062e8b00 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough.h
@@ -35,6 +35,7 @@
#include "ui/gl/gl_fence.h"
#include "ui/gl/gl_image.h"
#include "ui/gl/gl_surface.h"
+#include "ui/gl/gpu_switching_observer.h"
namespace gl {
class GLFence;
@@ -139,7 +140,9 @@ struct PassthroughResources {
std::unordered_map<GLuint, MappedBuffer> mapped_buffer_map;
};
-class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
+class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl
+ : public GLES2Decoder,
+ public ui::GpuSwitchingObserver {
public:
GLES2DecoderPassthroughImpl(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
@@ -347,6 +350,9 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
// directly, and needing to know if they failed due to loss.
bool CheckResetStatus() override;
+ // Implement GpuSwitchingObserver.
+ void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
+
Logger* GetLogger() override;
void BeginDecoding() override;
@@ -465,8 +471,7 @@ class GPU_GLES2_EXPORT GLES2DecoderPassthroughImpl : public GLES2Decoder {
void CheckSwapBuffersAsyncResult(const char* function_name,
uint64_t swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence);
+ gfx::SwapCompletionResult result);
error::Error CheckSwapBuffersResult(gfx::SwapResult result,
const char* function_name);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
index be04a014d68..069eb85a96b 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doer_prototypes.h
@@ -257,6 +257,11 @@ error::Error DoGetBooleanv(GLenum pname,
GLsizei bufsize,
GLsizei* length,
GLboolean* params);
+error::Error DoGetBooleani_v(GLenum pname,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLboolean* data);
error::Error DoGetBufferParameteri64v(GLenum target,
GLenum pname,
GLsizei bufsize,
@@ -1143,4 +1148,22 @@ error::Error DoBeginSharedImageAccessDirectCHROMIUM(GLuint client_id,
error::Error DoEndSharedImageAccessDirectCHROMIUM(GLuint client_id);
error::Error DoBeginBatchReadAccessSharedImageCHROMIUM(void);
error::Error DoEndBatchReadAccessSharedImageCHROMIUM(void);
+error::Error DoEnableiOES(GLenum target, GLuint index);
+error::Error DoDisableiOES(GLenum target, GLuint index);
+error::Error DoBlendEquationiOES(GLuint buf, GLenum mode);
+error::Error DoBlendEquationSeparateiOES(GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha);
+error::Error DoBlendFunciOES(GLuint buf, GLenum sfactor, GLenum dfactor);
+error::Error DoBlendFuncSeparateiOES(GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha);
+error::Error DoColorMaskiOES(GLuint buf,
+ GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha);
+error::Error DoIsEnablediOES(GLenum target, GLuint index, uint32_t* result);
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_PASSTHROUGH_DOER_PROTOTYPES_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
index ecad6a45c14..19086f610a0 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc
@@ -615,6 +615,12 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendEquation(GLenum mode) {
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendEquationiOES(GLuint buf,
+ GLenum mode) {
+ api()->glBlendEquationiOESFn(buf, mode);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate(
GLenum modeRGB,
GLenum modeAlpha) {
@@ -622,12 +628,27 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparate(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendEquationSeparateiOES(
+ GLuint buf,
+ GLenum modeRGB,
+ GLenum modeAlpha) {
+ api()->glBlendEquationSeparateiOESFn(buf, modeRGB, modeAlpha);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBlendFunc(GLenum sfactor,
GLenum dfactor) {
api()->glBlendFuncFn(sfactor, dfactor);
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendFunciOES(GLuint buf,
+ GLenum sfactor,
+ GLenum dfactor) {
+ api()->glBlendFunciOESFn(buf, sfactor, dfactor);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB,
GLenum dstRGB,
GLenum srcAlpha,
@@ -636,6 +657,16 @@ error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparate(GLenum srcRGB,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoBlendFuncSeparateiOES(
+ GLuint buf,
+ GLenum srcRGB,
+ GLenum dstRGB,
+ GLenum srcAlpha,
+ GLenum dstAlpha) {
+ api()->glBlendFuncSeparateiOESFn(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoBufferData(GLenum target,
GLsizeiptr size,
const void* data,
@@ -744,6 +775,15 @@ error::Error GLES2DecoderPassthroughImpl::DoColorMask(GLboolean red,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoColorMaskiOES(GLuint buf,
+ GLboolean red,
+ GLboolean green,
+ GLboolean blue,
+ GLboolean alpha) {
+ api()->glColorMaskiOESFn(buf, red, green, blue, alpha);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoCompileShader(GLuint shader) {
api()->glCompileShaderFn(GetShaderServiceID(shader, resources_));
return error::kNoError;
@@ -1591,6 +1631,15 @@ error::Error GLES2DecoderPassthroughImpl::DoGetBooleanv(GLenum pname,
});
}
+error::Error GLES2DecoderPassthroughImpl::DoGetBooleani_v(GLenum pname,
+ GLuint index,
+ GLsizei bufsize,
+ GLsizei* length,
+ GLboolean* data) {
+ glGetBooleani_vRobustANGLE(pname, index, bufsize, length, data);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoGetBufferParameteri64v(
GLenum target,
GLenum pname,
@@ -2238,6 +2287,13 @@ error::Error GLES2DecoderPassthroughImpl::DoIsEnabled(GLenum cap,
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoIsEnablediOES(GLenum target,
+ GLuint index,
+ uint32_t* result) {
+ *result = api()->glIsEnablediOESFn(target, index);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::DoIsFramebuffer(GLuint framebuffer,
uint32_t* result) {
*result = api()->glIsFramebufferEXTFn(
@@ -5062,26 +5118,8 @@ GLES2DecoderPassthroughImpl::DoUniformMatrix4fvStreamTextureMatrixCHROMIUM(
return error::kNoError;
}
- float gl_matrix[16] = {};
-
- GLStreamTextureImage* image =
- bound_texture->GetStreamLevelImage(kTextureTarget, 0);
- if (image) {
- gfx::Transform st_transform(gfx::Transform::kSkipInitialization);
- gfx::Transform pre_transform(gfx::Transform::kSkipInitialization);
- image->GetTextureMatrix(gl_matrix);
- st_transform.matrix().setColMajorf(gl_matrix);
- // const_cast is safe, because setColMajorf only does a memcpy.
- // TODO(piman): can we remove this assumption without having to introduce
- // an extra copy?
- pre_transform.matrix().setColMajorf(const_cast<const GLfloat*>(transform));
- gfx::Transform(pre_transform, st_transform).matrix().asColMajorf(gl_matrix);
- } else {
- // Missing stream texture. Treat matrix as identity.
- memcpy(gl_matrix, const_cast<const GLfloat*>(transform), sizeof(gl_matrix));
- }
-
- api()->glUniformMatrix4fvFn(location, 1, transpose, gl_matrix);
+ api()->glUniformMatrix4fvFn(location, 1, transpose,
+ const_cast<const GLfloat*>(transform));
return error::kNoError;
}
@@ -5406,7 +5444,8 @@ error::Error
GLES2DecoderPassthroughImpl::DoBeginSharedImageAccessDirectCHROMIUM(
GLuint client_id,
GLenum mode) {
- if (mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM &&
+ if (mode != GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM &&
+ mode != GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM &&
mode != GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
InsertError(GL_INVALID_ENUM, "unrecognized access mode");
return error::kNoError;
@@ -5460,5 +5499,17 @@ GLES2DecoderPassthroughImpl::DoEndBatchReadAccessSharedImageCHROMIUM() {
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::DoEnableiOES(GLenum target,
+ GLuint index) {
+ api()->glEnableiOESFn(target, index);
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::DoDisableiOES(GLenum target,
+ GLuint index) {
+ api()->glDisableiOESFn(target, index);
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
index 7e7ca53b9c9..cddfeff0b97 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_passthrough_handlers_autogen.cc
@@ -1130,6 +1130,37 @@ error::Error GLES2DecoderPassthroughImpl::HandleGetBooleanv(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleGetBooleani_v(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ if (!feature_info_->IsWebGL2OrES3OrHigherContext())
+ return error::kUnknownCommand;
+ const volatile gles2::cmds::GetBooleani_v& c =
+ *static_cast<const volatile gles2::cmds::GetBooleani_v*>(cmd_data);
+ GLenum pname = static_cast<GLenum>(c.pname);
+ GLuint index = static_cast<GLuint>(c.index);
+ unsigned int buffer_size = 0;
+ typedef cmds::GetBooleani_v::Result Result;
+ Result* result = GetSharedMemoryAndSizeAs<Result*>(
+ c.data_shm_id, c.data_shm_offset, sizeof(Result), &buffer_size);
+ GLboolean* data = result ? result->GetData() : nullptr;
+ if (data == nullptr) {
+ return error::kOutOfBounds;
+ }
+ GLsizei bufsize = Result::ComputeMaxResults(buffer_size);
+ GLsizei written_values = 0;
+ GLsizei* length = &written_values;
+ error::Error error = DoGetBooleani_v(pname, index, bufsize, length, data);
+ if (error != error::kNoError) {
+ return error;
+ }
+ if (written_values > bufsize) {
+ return error::kOutOfBounds;
+ }
+ result->SetNumResults(written_values);
+ return error::kNoError;
+}
+
error::Error GLES2DecoderPassthroughImpl::HandleGetBufferParameteri64v(
uint32_t immediate_data_size,
const volatile void* cmd_data) {
@@ -4888,5 +4919,166 @@ GLES2DecoderPassthroughImpl::HandleEndBatchReadAccessSharedImageCHROMIUM(
return error::kNoError;
}
+error::Error GLES2DecoderPassthroughImpl::HandleEnableiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::EnableiOES& c =
+ *static_cast<const volatile gles2::cmds::EnableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ error::Error error = DoEnableiOES(target, index);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleDisableiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::DisableiOES& c =
+ *static_cast<const volatile gles2::cmds::DisableiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ error::Error error = DoDisableiOES(target, index);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendEquationiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum mode = static_cast<GLenum>(c.mode);
+ error::Error error = DoBlendEquationiOES(buf, mode);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendEquationSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendEquationSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendEquationSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum modeRGB = static_cast<GLenum>(c.modeRGB);
+ GLenum modeAlpha = static_cast<GLenum>(c.modeAlpha);
+ error::Error error = DoBlendEquationSeparateiOES(buf, modeRGB, modeAlpha);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendFunciOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFunciOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFunciOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum src = static_cast<GLenum>(c.src);
+ GLenum dst = static_cast<GLenum>(c.dst);
+ error::Error error = DoBlendFunciOES(buf, src, dst);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleBlendFuncSeparateiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::BlendFuncSeparateiOES& c =
+ *static_cast<const volatile gles2::cmds::BlendFuncSeparateiOES*>(
+ cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLenum srcRGB = static_cast<GLenum>(c.srcRGB);
+ GLenum dstRGB = static_cast<GLenum>(c.dstRGB);
+ GLenum srcAlpha = static_cast<GLenum>(c.srcAlpha);
+ GLenum dstAlpha = static_cast<GLenum>(c.dstAlpha);
+ error::Error error =
+ DoBlendFuncSeparateiOES(buf, srcRGB, dstRGB, srcAlpha, dstAlpha);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleColorMaskiOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::ColorMaskiOES& c =
+ *static_cast<const volatile gles2::cmds::ColorMaskiOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLuint buf = static_cast<GLuint>(c.buf);
+ GLboolean r = static_cast<GLboolean>(c.r);
+ GLboolean g = static_cast<GLboolean>(c.g);
+ GLboolean b = static_cast<GLboolean>(c.b);
+ GLboolean a = static_cast<GLboolean>(c.a);
+ error::Error error = DoColorMaskiOES(buf, r, g, b, a);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
+error::Error GLES2DecoderPassthroughImpl::HandleIsEnablediOES(
+ uint32_t immediate_data_size,
+ const volatile void* cmd_data) {
+ const volatile gles2::cmds::IsEnablediOES& c =
+ *static_cast<const volatile gles2::cmds::IsEnablediOES*>(cmd_data);
+ if (!features().oes_draw_buffers_indexed) {
+ return error::kUnknownCommand;
+ }
+
+ GLenum target = static_cast<GLenum>(c.target);
+ GLuint index = static_cast<GLuint>(c.index);
+ typedef cmds::IsEnablediOES::Result Result;
+ Result* result = GetSharedMemoryAs<Result*>(
+ c.result_shm_id, c.result_shm_offset, sizeof(*result));
+ if (!result) {
+ return error::kOutOfBounds;
+ }
+ error::Error error = DoIsEnablediOES(target, index, result);
+ if (error != error::kNoError) {
+ return error;
+ }
+ return error::kNoError;
+}
+
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
index bd8874bf3b2..97996f33dd9 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1.cc
@@ -176,21 +176,6 @@ void GLES2DecoderTestBase::SpecializedSetup<
}
template <>
-void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(
- bool valid) {
- if (valid) {
- // GetProgramiv calls ClearGLError then GetError to make sure
- // it actually got a value so it can report correctly to the client.
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
- }
-}
-
-template <>
void GLES2DecoderTestBase::
SpecializedSetup<cmds::GenTransformFeedbacksImmediate, 0>(bool valid) {
if (valid) {
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
index 3d5553178ca..3bc69db4302 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_1_autogen.h
@@ -1233,57 +1233,4 @@ TEST_P(GLES2DecoderTest1, GetIntegervInvalidArgs1_1) {
EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
EXPECT_EQ(0u, result->size);
}
-
-TEST_P(GLES2DecoderTest1, GetProgramivValidArgs) {
- SpecializedSetup<cmds::GetProgramiv, 0>(true);
- typedef cmds::GetProgramiv::Result Result;
- Result* result = static_cast<Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
- shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS),
- result->GetNumResults());
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs1_0) {
- EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
- SpecializedSetup<cmds::GetProgramiv, 0>(false);
- cmds::GetProgramiv::Result* result =
- static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT,
- shared_memory_id_, shared_memory_offset_);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(0u, result->size);
- EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
-}
-
-TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_0) {
- EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
- SpecializedSetup<cmds::GetProgramiv, 0>(false);
- cmds::GetProgramiv::Result* result =
- static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(0u, result->size);
-}
-
-TEST_P(GLES2DecoderTest1, GetProgramivInvalidArgs2_1) {
- EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
- SpecializedSetup<cmds::GetProgramiv, 0>(false);
- cmds::GetProgramiv::Result* result =
- static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
- result->size = 0;
- cmds::GetProgramiv cmd;
- cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
- kInvalidSharedMemoryOffset);
- EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
- EXPECT_EQ(0u, result->size);
-}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_1_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
index 8288b6d3b18..75b71b0ed28 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2.cc
@@ -691,6 +691,20 @@ void GLES2DecoderTestBase::SpecializedSetup<
}
template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::GetProgramiv, 0>(bool valid) {
+ if (valid) {
+ // GetProgramiv calls ClearGLError then GetError to make sure
+ // it actually got a value so it can report correctly to the client.
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
+ }
+}
+
+template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::RenderbufferStorage, 0>(
bool valid) {
DoBindRenderbuffer(GL_RENDERBUFFER, client_renderbuffer_id_,
@@ -851,12 +865,6 @@ void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform3ivImmediate, 0>(
}
template <>
-void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
- bool /* valid */) {
- SetupShaderForUniform(GL_FLOAT_VEC4);
-}
-
-template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::UniformMatrix2fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_MAT2);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
index 47fd77225a4..fa45593b8f5 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_2_autogen.h
@@ -12,6 +12,59 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
+TEST_P(GLES2DecoderTest2, GetProgramivValidArgs) {
+ SpecializedSetup<cmds::GetProgramiv, 0>(true);
+ typedef cmds::GetProgramiv::Result Result;
+ Result* result = static_cast<Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
+ shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(decoder_->GetGLES2Util()->GLGetNumValuesReturned(GL_DELETE_STATUS),
+ result->GetNumResults());
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs1_0) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_PROGRAM_BINARY_RETRIEVABLE_HINT,
+ shared_memory_id_, shared_memory_offset_);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+ EXPECT_EQ(GL_INVALID_ENUM, GetGLError());
+}
+
+TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs2_0) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, kInvalidSharedMemoryId, 0);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
+TEST_P(GLES2DecoderTest2, GetProgramivInvalidArgs2_1) {
+ EXPECT_CALL(*gl_, GetProgramiv(_, _, _)).Times(0);
+ SpecializedSetup<cmds::GetProgramiv, 0>(false);
+ cmds::GetProgramiv::Result* result =
+ static_cast<cmds::GetProgramiv::Result*>(shared_memory_address_);
+ result->size = 0;
+ cmds::GetProgramiv cmd;
+ cmd.Init(client_program_id_, GL_DELETE_STATUS, shared_memory_id_,
+ kInvalidSharedMemoryOffset);
+ EXPECT_EQ(error::kOutOfBounds, ExecuteCmd(cmd));
+ EXPECT_EQ(0u, result->size);
+}
+
TEST_P(GLES2DecoderTest2, GetProgramInfoLogValidArgs) {
const char* kInfo = "hello";
const uint32_t kBucketId = 123;
@@ -1298,13 +1351,4 @@ TEST_P(GLES2DecoderTest2, Uniform3ivImmediateValidArgs) {
EXPECT_EQ(error::kNoError, ExecuteImmediateCmd(cmd, sizeof(temp)));
EXPECT_EQ(GL_NO_ERROR, GetGLError());
}
-
-TEST_P(GLES2DecoderTest2, Uniform4fValidArgs) {
- EXPECT_CALL(*gl_, Uniform4fv(1, 1, _));
- SpecializedSetup<cmds::Uniform4f, 0>(true);
- cmds::Uniform4f cmd;
- cmd.Init(1, 2, 3, 4, 5);
- EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
- EXPECT_EQ(GL_NO_ERROR, GetGLError());
-}
#endif // GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_2_AUTOGEN_H_
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
index 580131f038c..b37cb1943d8 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3.cc
@@ -51,6 +51,12 @@ INSTANTIATE_TEST_SUITE_P(Service, GLES2DecoderTest3, ::testing::Bool());
INSTANTIATE_TEST_SUITE_P(Service, GLES3DecoderTest3, ::testing::Bool());
template <>
+void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4f, 0>(
+ bool /* valid */) {
+ SetupShaderForUniform(GL_FLOAT_VEC4);
+}
+
+template <>
void GLES2DecoderTestBase::SpecializedSetup<cmds::Uniform4fvImmediate, 0>(
bool /* valid */) {
SetupShaderForUniform(GL_FLOAT_VEC4);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
index 00161c02032..10ec529b465 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_decoder_unittest_3_autogen.h
@@ -12,6 +12,15 @@
#ifndef GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
#define GPU_COMMAND_BUFFER_SERVICE_GLES2_CMD_DECODER_UNITTEST_3_AUTOGEN_H_
+TEST_P(GLES2DecoderTest3, Uniform4fValidArgs) {
+ EXPECT_CALL(*gl_, Uniform4fv(1, 1, _));
+ SpecializedSetup<cmds::Uniform4f, 0>(true);
+ cmds::Uniform4f cmd;
+ cmd.Init(1, 2, 3, 4, 5);
+ EXPECT_EQ(error::kNoError, ExecuteCmd(cmd));
+ EXPECT_EQ(GL_NO_ERROR, GetGLError());
+}
+
TEST_P(GLES2DecoderTest3, Uniform4fvImmediateValidArgs) {
cmds::Uniform4fvImmediate& cmd = *GetImmediateAs<cmds::Uniform4fvImmediate>();
SpecializedSetup<cmds::Uniform4fvImmediate, 0>(true);
diff --git a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
index 5284cc802b9..38d67d62bb3 100644
--- a/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
+++ b/chromium/gpu/command_buffer/service/gles2_cmd_validation_implementation_autogen.h
@@ -437,6 +437,13 @@ static const GLenum valid_g_l_state_table_es3[] = {
GL_UNPACK_SKIP_IMAGES,
GL_UNPACK_SKIP_PIXELS,
GL_UNPACK_SKIP_ROWS,
+ GL_BLEND_EQUATION_RGB,
+ GL_BLEND_EQUATION_ALPHA,
+ GL_BLEND_SRC_RGB,
+ GL_BLEND_SRC_ALPHA,
+ GL_BLEND_DST_RGB,
+ GL_BLEND_DST_ALPHA,
+ GL_COLOR_WRITEMASK,
};
bool Validators::GetMaxIndexTypeValidator::IsValid(const GLenum value) const {
@@ -512,6 +519,13 @@ static const GLenum valid_indexed_g_l_state_table[] = {
GL_UNIFORM_BUFFER_BINDING,
GL_UNIFORM_BUFFER_SIZE,
GL_UNIFORM_BUFFER_START,
+ GL_BLEND_EQUATION_RGB,
+ GL_BLEND_EQUATION_ALPHA,
+ GL_BLEND_SRC_RGB,
+ GL_BLEND_SRC_ALPHA,
+ GL_BLEND_DST_RGB,
+ GL_BLEND_DST_ALPHA,
+ GL_COLOR_WRITEMASK,
};
bool Validators::InternalFormatParameterValidator::IsValid(
@@ -773,6 +787,7 @@ bool Validators::ShaderTypeValidator::IsValid(const GLenum value) const {
bool Validators::SharedImageAccessModeValidator::IsValid(
const GLenum value) const {
switch (value) {
+ case GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM:
case GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM:
case GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM:
return true;
diff --git a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
index fa686c24432..97b6d43e986 100644
--- a/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
+++ b/chromium/gpu/command_buffer/service/gpu_command_buffer_memory_tracker.cc
@@ -46,9 +46,11 @@ GpuCommandBufferMemoryTracker::GpuCommandBufferMemoryTracker(
: command_buffer_id_(command_buffer_id),
client_tracing_id_(client_tracing_id),
context_type_(context_type),
- memory_pressure_listener_(base::BindRepeating(
- &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
- base::Unretained(this))),
+ memory_pressure_listener_(
+ FROM_HERE,
+ base::BindRepeating(
+ &GpuCommandBufferMemoryTracker::LogMemoryStatsPressure,
+ base::Unretained(this))),
observer_(observer) {
// Set up |memory_stats_timer_| to call LogMemoryPeriodic periodically
// via the provided |task_runner|.
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
index c743800f0a5..c1ebdde777a 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.cc
@@ -45,22 +45,38 @@ bool IsSurfaceControl(TextureOwner::Mode mode) {
}
} // namespace
+// This class is safe to be created/destroyed on different threads. This is made
+// sure by destruction happening on correct thread. This class is not thread
+// safe to be used concurrently on multiple thraeads.
class ImageReaderGLOwner::ScopedHardwareBufferImpl
: public base::android::ScopedHardwareBufferFenceSync {
public:
- ScopedHardwareBufferImpl(scoped_refptr<ImageReaderGLOwner> texture_owner,
+ ScopedHardwareBufferImpl(base::WeakPtr<ImageReaderGLOwner> texture_owner,
AImage* image,
base::android::ScopedHardwareBufferHandle handle,
base::ScopedFD fence_fd)
: base::android::ScopedHardwareBufferFenceSync(std::move(handle),
- std::move(fence_fd)),
+ std::move(fence_fd),
+ base::ScopedFD(),
+ true /* is_video */),
texture_owner_(std::move(texture_owner)),
- image_(image) {
+ image_(image),
+ task_runner_(base::ThreadTaskRunnerHandle::Get()) {
DCHECK(image_);
texture_owner_->RegisterRefOnImage(image_);
}
+
~ScopedHardwareBufferImpl() override {
- texture_owner_->ReleaseRefOnImage(image_, std::move(read_fence_));
+ if (task_runner_->RunsTasksInCurrentSequence()) {
+ if (texture_owner_) {
+ texture_owner_->ReleaseRefOnImage(image_, std::move(read_fence_));
+ }
+ } else {
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(&gpu::ImageReaderGLOwner::ReleaseRefOnImage,
+ texture_owner_, image_, std::move(read_fence_)));
+ }
}
void SetReadFence(base::ScopedFD fence_fd, bool has_context) final {
@@ -72,8 +88,9 @@ class ImageReaderGLOwner::ScopedHardwareBufferImpl
private:
base::ScopedFD read_fence_;
- scoped_refptr<ImageReaderGLOwner> texture_owner_;
+ base::WeakPtr<ImageReaderGLOwner> texture_owner_;
AImage* image_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
};
ImageReaderGLOwner::ImageReaderGLOwner(
@@ -305,7 +322,7 @@ ImageReaderGLOwner::GetAHardwareBuffer() {
return nullptr;
return std::make_unique<ScopedHardwareBufferImpl>(
- this, current_image_ref_->image(),
+ weak_factory_.GetWeakPtr(), current_image_ref_->image(),
base::android::ScopedHardwareBufferHandle::Create(buffer),
current_image_ref_->GetReadyFence());
}
@@ -367,96 +384,6 @@ void ImageReaderGLOwner::ReleaseRefOnImage(AImage* image,
image_refs_.erase(it);
}
-void ImageReaderGLOwner::GetTransformMatrix(float mtx[]) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
-
- // Assign a Y inverted Identity matrix. Both MCVD and AVDA path performs a Y
- // inversion of this matrix later. Hence if we assign a Y inverted matrix
- // here, it simply becomes an identity matrix later and will have no effect
- // on the image data.
- static constexpr float kYInvertedIdentity[16]{1, 0, 0, 0, 0, -1, 0, 0,
- 0, 0, 1, 0, 0, 1, 0, 1};
- memcpy(mtx, kYInvertedIdentity, sizeof(kYInvertedIdentity));
-
-
- // Get the crop rectangle associated with this image. The crop rectangle
- // specifies the region of valid pixels in the image.
- gfx::Rect crop_rect = GetCropRect();
- if (crop_rect.IsEmpty())
- return;
-
- // Get the AHardwareBuffer to query its dimensions.
- AHardwareBuffer* buffer = nullptr;
- loader_.AImage_getHardwareBuffer(current_image_ref_->image(), &buffer);
- if (!buffer) {
- DLOG(ERROR) << "Unable to get an AHardwareBuffer from the image";
- return;
- }
-
- // Get the buffer descriptor. Note that for querying the buffer descriptor, we
- // do not need to wait on the AHB to be ready.
- AHardwareBuffer_Desc desc;
- base::AndroidHardwareBufferCompat::GetInstance().Describe(buffer, &desc);
-
- // Note: Below calculation of shrink_amount and the transform matrix params
- // tx,ty,sx,sy is copied from the android
- // SurfaceTexture::computeCurrentTransformMatrix() -
- // https://android.googlesource.com/platform/frameworks/native/+/5c1139f/libs/gui/SurfaceTexture.cpp#516.
- // We are assuming here that bilinear filtering is always enabled for
- // sampling the texture.
- float shrink_amount = 0.0f;
- float tx = 0.0f, ty = 0.0f, sx = 1.0f, sy = 1.0f;
-
- // In order to prevent bilinear sampling beyond the edge of the
- // crop rectangle we may need to shrink it by 2 texels in each
- // dimension. Normally this would just need to take 1/2 a texel
- // off each end, but because the chroma channels of YUV420 images
- // are subsampled we may need to shrink the crop region by a whole
- // texel on each side.
- switch (desc.format) {
- case AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM:
- case AHARDWAREBUFFER_FORMAT_R8G8B8X8_UNORM:
- case AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM:
- case AHARDWAREBUFFER_FORMAT_R5G6B5_UNORM:
- // We know there's no subsampling of any channels, so we
- // only need to shrink by a half a pixel.
- shrink_amount = 0.5;
- break;
- default:
- // If we don't recognize the format, we must assume the
- // worst case (that we care about), which is YUV420.
- shrink_amount = 1.0;
- }
-
- int32_t crop_rect_width = crop_rect.width();
- int32_t crop_rect_height = crop_rect.height();
- int32_t crop_rect_left = crop_rect.x();
- int32_t crop_rect_bottom = crop_rect.y() + crop_rect_height;
- int32_t buffer_width = desc.width;
- int32_t buffer_height = desc.height;
- DCHECK_GT(buffer_width, 0);
- DCHECK_GT(buffer_height, 0);
-
- // Only shrink the dimensions that are not the size of the buffer.
- if (crop_rect_width < buffer_width) {
- tx = (float(crop_rect_left) + shrink_amount) / buffer_width;
- sx = (float(crop_rect_width) - (2.0f * shrink_amount)) / buffer_width;
- }
-
- if (crop_rect_height < buffer_height) {
- ty = (float(buffer_height - crop_rect_bottom) + shrink_amount) /
- buffer_height;
- sy = (float(crop_rect_height) - (2.0f * shrink_amount)) / buffer_height;
- }
-
- // Update the transform matrix with above parameters by also taking into
- // account Y inversion/ vertical flip.
- mtx[0] = sx;
- mtx[5] = 0 - sy;
- mtx[12] = tx;
- mtx[13] = 1 - ty;
-}
-
void ImageReaderGLOwner::ReleaseBackBuffers() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
// ReleaseBackBuffers() call is not required with image reader.
@@ -482,7 +409,7 @@ void ImageReaderGLOwner::OnFrameAvailable(void* context, AImageReader* reader) {
image_reader_ptr->frame_available_cb_.Run();
}
-void ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
+bool ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) {
@@ -499,7 +426,7 @@ void ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
if (!buffer) {
*coded_size = gfx::Size();
*visible_rect = gfx::Rect();
- return;
+ return false;
}
// Get the buffer descriptor. Note that for querying the buffer descriptor, we
// do not need to wait on the AHB to be ready.
@@ -508,6 +435,8 @@ void ImageReaderGLOwner::GetCodedSizeAndVisibleRect(
*visible_rect = GetCropRect();
*coded_size = gfx::Size(desc.width, desc.height);
+
+ return true;
}
ImageReaderGLOwner::ImageRef::ImageRef() = default;
diff --git a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
index 0d9f93f0475..b6c2d2c0d3b 100644
--- a/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
+++ b/chromium/gpu/command_buffer/service/image_reader_gl_owner.h
@@ -9,6 +9,7 @@
#include "base/android/android_image_reader_compat.h"
#include "base/containers/flat_map.h"
+#include "base/memory/weak_ptr.h"
#include "gpu/command_buffer/service/texture_owner.h"
#include "gpu/gpu_gles2_export.h"
#include "ui/gl/gl_fence_egl.h"
@@ -37,12 +38,10 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
gl::ScopedJavaSurface CreateJavaSurface() const override;
void UpdateTexImage() override;
void EnsureTexImageBound() override;
- void GetTransformMatrix(float mtx[16]) override;
void ReleaseBackBuffers() override;
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() override;
- gfx::Rect GetCropRect() override;
- void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
+ bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) override;
@@ -89,6 +88,8 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
void RegisterRefOnImage(AImage* image);
void ReleaseRefOnImage(AImage* image, base::ScopedFD fence_fd);
+ gfx::Rect GetCropRect();
+
static void OnFrameAvailable(void* context, AImageReader* reader);
// AImageReader instance
@@ -132,6 +133,8 @@ class GPU_GLES2_EXPORT ImageReaderGLOwner : public TextureOwner {
THREAD_CHECKER(thread_checker_);
+ base::WeakPtrFactory<ImageReaderGLOwner> weak_factory_{this};
+
DISALLOW_COPY_AND_ASSIGN(ImageReaderGLOwner);
};
diff --git a/chromium/gpu/command_buffer/service/memory_tracking.h b/chromium/gpu/command_buffer/service/memory_tracking.h
index ea211deddf6..d55a130e2ec 100644
--- a/chromium/gpu/command_buffer/service/memory_tracking.h
+++ b/chromium/gpu/command_buffer/service/memory_tracking.h
@@ -9,7 +9,7 @@
#include <stdint.h>
#include <string>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/trace_event/trace_event.h"
diff --git a/chromium/gpu/command_buffer/service/mock_texture_owner.h b/chromium/gpu/command_buffer/service/mock_texture_owner.h
index e98cdc4a729..2cf23affb93 100644
--- a/chromium/gpu/command_buffer/service/mock_texture_owner.h
+++ b/chromium/gpu/command_buffer/service/mock_texture_owner.h
@@ -33,12 +33,11 @@ class MockTextureOwner : public TextureOwner {
MOCK_CONST_METHOD0(CreateJavaSurface, gl::ScopedJavaSurface());
MOCK_METHOD0(UpdateTexImage, void());
MOCK_METHOD0(EnsureTexImageBound, void());
- MOCK_METHOD1(GetTransformMatrix, void(float mtx[16]));
MOCK_METHOD0(ReleaseBackBuffers, void());
MOCK_METHOD1(OnTextureDestroyed, void(gpu::gles2::AbstractTexture*));
MOCK_METHOD1(SetFrameAvailableCallback, void(const base::RepeatingClosure&));
MOCK_METHOD3(GetCodedSizeAndVisibleRect,
- void(gfx::Size rotated_visible_size,
+ bool(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect));
@@ -48,15 +47,9 @@ class MockTextureOwner : public TextureOwner {
return nullptr;
}
- gfx::Rect GetCropRect() override {
- ++get_crop_rect_count;
- return gfx::Rect();
- }
-
gl::GLContext* fake_context;
gl::GLSurface* fake_surface;
int get_a_hardware_buffer_count = 0;
- int get_crop_rect_count = 0;
bool expect_update_tex_image;
protected:
diff --git a/chromium/gpu/command_buffer/service/mocks.h b/chromium/gpu/command_buffer/service/mocks.h
index d01e9b6c4bd..ac9a5efb25c 100644
--- a/chromium/gpu/command_buffer/service/mocks.h
+++ b/chromium/gpu/command_buffer/service/mocks.h
@@ -16,7 +16,6 @@
#include <string>
#include <vector>
-#include "base/logging.h"
#include "gpu/command_buffer/common/cmd_buffer_common.h"
#include "gpu/command_buffer/service/async_api_interface.h"
#include "gpu/command_buffer/service/memory_tracking.h"
diff --git a/chromium/gpu/command_buffer/service/program_manager.h b/chromium/gpu/command_buffer/service/program_manager.h
index 800de937f5d..f99a099b0c8 100644
--- a/chromium/gpu/command_buffer/service/program_manager.h
+++ b/chromium/gpu/command_buffer/service/program_manager.h
@@ -13,7 +13,7 @@
#include <string>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/common_decoder.h"
diff --git a/chromium/gpu/command_buffer/service/raster_decoder.cc b/chromium/gpu/command_buffer/service/raster_decoder.cc
index 12302131817..aec7821dc48 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder.cc
@@ -208,7 +208,8 @@ bool AllowedBetweenBeginEndRaster(CommandId command) {
// avoid it as much as possible.
class RasterDecoderImpl final : public RasterDecoder,
public gles2::ErrorStateClient,
- public ServiceFontManager::Client {
+ public ServiceFontManager::Client,
+ public SharedContextState::ContextLostObserver {
public:
RasterDecoderImpl(DecoderClient* client,
CommandBufferServiceBase* command_buffer_service,
@@ -365,6 +366,9 @@ class RasterDecoderImpl final : public RasterDecoder,
scoped_refptr<Buffer> GetShmBuffer(uint32_t shm_id) override;
void ReportProgress() override;
+ // SharedContextState::ContextLostObserver implementation.
+ void OnContextLost() override;
+
private:
gles2::ContextState* state() const {
if (use_passthrough_) {
@@ -401,7 +405,7 @@ class RasterDecoderImpl final : public RasterDecoder,
if (!flush_workaround_disabled_for_test_) {
TRACE_EVENT0("gpu", "RasterDecoderImpl::FlushToWorkAroundMacCrashes");
if (gr_context())
- gr_context()->flush();
+ gr_context()->flushAndSubmit();
api()->glFlushFn();
// Flushes can be expensive, yield to allow interruption after each flush.
@@ -583,8 +587,6 @@ class RasterDecoderImpl final : public RasterDecoder,
bool use_passthrough_ = false;
bool use_ddl_ = false;
- bool reset_by_robustness_extension_ = false;
-
// The current decoder error communicates the decoder error through command
// processing functions that do not return the error value. Should be set
// only if not returning an error.
@@ -756,9 +758,12 @@ RasterDecoderImpl::RasterDecoderImpl(
font_manager_(base::MakeRefCounted<ServiceFontManager>(this)),
is_privileged_(is_privileged) {
DCHECK(shared_context_state_);
+ shared_context_state_->AddContextLostObserver(this);
}
-RasterDecoderImpl::~RasterDecoderImpl() = default;
+RasterDecoderImpl::~RasterDecoderImpl() {
+ shared_context_state_->RemoveContextLostObserver(this);
+}
base::WeakPtr<DecoderContext> RasterDecoderImpl::AsWeakPtr() {
return weak_ptr_factory_.GetWeakPtr();
@@ -854,16 +859,12 @@ void RasterDecoderImpl::Destroy(bool have_context) {
DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
end_semaphores_.clear();
sk_surface_ = nullptr;
- if (shared_image_) {
- scoped_shared_image_write_.reset();
- shared_image_.reset();
- } else {
- sk_surface_for_testing_.reset();
- }
- }
- if (gr_context()) {
- gr_context()->flush();
}
+ if (gr_context())
+ gr_context()->flushAndSubmit();
+ scoped_shared_image_write_.reset();
+ shared_image_.reset();
+ sk_surface_for_testing_.reset();
}
copy_tex_image_blit_.reset();
@@ -891,18 +892,11 @@ bool RasterDecoderImpl::MakeCurrent() {
if (shared_context_state_->context_lost() ||
!shared_context_state_->MakeCurrent(nullptr)) {
LOG(ERROR) << " RasterDecoderImpl: Context lost during MakeCurrent.";
- MarkContextLost(error::kMakeCurrentFailed);
return false;
}
DCHECK_EQ(api(), gl::g_current_gl_context);
- if (CheckResetStatus()) {
- LOG(ERROR)
- << " RasterDecoderImpl: Context reset detected after MakeCurrent.";
- return false;
- }
-
// Rebind textures if the service ids may have changed.
RestoreAllExternalTextureBindingsIfNeeded();
@@ -948,6 +942,10 @@ Capabilities RasterDecoderImpl::GetCapabilities() {
#else
NOTREACHED();
#endif
+ } else if (shared_context_state_->GrContextIsDawn()) {
+ // TODO(crbug.com/1090476): Query Dawn for this value once an API exists for
+ // capabilities.
+ caps.max_texture_size = 8192;
} else {
NOTIMPLEMENTED();
}
@@ -1113,55 +1111,27 @@ void RasterDecoderImpl::SetLevelInfo(uint32_t client_id,
}
bool RasterDecoderImpl::WasContextLost() const {
- return context_lost_;
+ return shared_context_state_->context_lost();
}
bool RasterDecoderImpl::WasContextLostByRobustnessExtension() const {
- return WasContextLost() && reset_by_robustness_extension_;
+ return shared_context_state_->device_needs_reset();
}
void RasterDecoderImpl::MarkContextLost(error::ContextLostReason reason) {
- // Only lose the context once.
- if (WasContextLost())
- return;
+ shared_context_state_->MarkContextLost(reason);
+}
- // Don't make GL calls in here, the context might not be current.
- context_lost_ = true;
- command_buffer_service()->SetContextLostReason(reason);
+void RasterDecoderImpl::OnContextLost() {
+ DCHECK(shared_context_state_->context_lost());
+ command_buffer_service()->SetContextLostReason(
+ *shared_context_state_->context_lost_reason());
current_decoder_error_ = error::kLostContext;
}
bool RasterDecoderImpl::CheckResetStatus() {
DCHECK(!WasContextLost());
- DCHECK(shared_context_state_->context()->IsCurrent(nullptr));
-
- // If the reason for the call was a GL error, we can try to determine the
- // reset status more accurately.
- GLenum driver_status =
- shared_context_state_->context()->CheckStickyGraphicsResetStatus();
- if (driver_status == GL_NO_ERROR)
- return false;
-
- LOG(ERROR) << "RasterDecoder context lost via ARB/EXT_robustness. Reset "
- "status = "
- << gles2::GLES2Util::GetStringEnum(driver_status);
-
- switch (driver_status) {
- case GL_GUILTY_CONTEXT_RESET_ARB:
- MarkContextLost(error::kGuilty);
- break;
- case GL_INNOCENT_CONTEXT_RESET_ARB:
- MarkContextLost(error::kInnocent);
- break;
- case GL_UNKNOWN_CONTEXT_RESET_ARB:
- MarkContextLost(error::kUnknown);
- break;
- default:
- NOTREACHED();
- return false;
- }
- reset_by_robustness_extension_ = true;
- return true;
+ return shared_context_state_->CheckResetStatus(/*needs_gl=*/false);
}
gles2::Logger* RasterDecoderImpl::GetLogger() {
@@ -1500,14 +1470,13 @@ void RasterDecoderImpl::DisableFlushWorkaroundForTest() {
void RasterDecoderImpl::OnContextLostError() {
if (!WasContextLost()) {
// Need to lose current context before broadcasting!
- CheckResetStatus();
- reset_by_robustness_extension_ = true;
+ shared_context_state_->CheckResetStatus(/*needs_gl=*/false);
}
}
void RasterDecoderImpl::OnOutOfMemoryError() {
if (lose_context_when_out_of_memory_ && !WasContextLost()) {
- if (!CheckResetStatus()) {
+ if (!shared_context_state_->CheckResetStatus(/*needs_gl=*/false)) {
MarkContextLost(error::kOutOfMemory);
}
}
@@ -2071,17 +2040,14 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALGL(
if (gles2::GLStreamTextureImage* image =
source_texture->GetLevelStreamTextureImage(GL_TEXTURE_EXTERNAL_OES,
source_level)) {
- GLfloat transform_matrix[16];
- image->GetTextureMatrix(transform_matrix);
-
- copy_texture_chromium_->DoCopySubTextureWithTransform(
+ copy_texture_chromium_->DoCopySubTexture(
this, source_target, source_texture->service_id(), source_level,
source_internal_format, dest_target, dest_texture->service_id(),
dest_level, dest_internal_format, xoffset, yoffset, x, y, width,
height, dest_size.width(), dest_size.height(), source_size.width(),
source_size.height(), unpack_flip_y, unpack_premultiply_alpha,
- false /* unpack_unmultiply_alpha */, false /* dither */,
- transform_matrix, copy_tex_image_blit_.get());
+ /*unpack_unmultiply_alpha=*/false, /*dither=*/false,
+ gles2::CopyTextureMethod::DIRECT_DRAW, copy_tex_image_blit_.get());
dest_texture->SetLevelClearedRect(dest_target, dest_level,
new_cleared_rect);
return;
@@ -2255,8 +2221,13 @@ void RasterDecoderImpl::DoCopySubTextureINTERNALSkia(
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access->surface()->flush(
+ auto result = dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ // If the |end_semaphores| is empty, we can deferred the queue submission.
+ if (!end_semaphores.empty()) {
+ DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ }
if (!dest_shared_image->IsCleared()) {
dest_shared_image->SetClearedRect(new_cleared_rect);
@@ -2297,6 +2268,15 @@ void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset,
return;
}
+ if (SkColorTypeBytesPerPixel(viz::ResourceFormatToClosestSkColorType(
+ true, dest_shared_image->format())) !=
+ SkColorTypeBytesPerPixel(static_cast<SkColorType>(src_sk_color_type))) {
+ LOCAL_SET_GL_ERROR(GL_INVALID_OPERATION, "glWritePixels",
+ "Bytes per pixel for src SkColorType and dst "
+ "SkColorType must be the same.");
+ return;
+ }
+
// If present, the color space is serialized into shared memory before the
// pixel data.
sk_sp<SkColorSpace> color_space;
@@ -2375,8 +2355,12 @@ void RasterDecoderImpl::DoWritePixelsINTERNAL(GLint x_offset,
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access->surface()->flush(
+ auto result = dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ if (!end_semaphores.empty()) {
+ DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ }
if (!dest_shared_image->IsCleared()) {
dest_shared_image->SetClearedRect(
@@ -2565,8 +2549,12 @@ void RasterDecoderImpl::DoConvertYUVMailboxesToRGBINTERNAL(
};
gpu::AddVulkanCleanupTaskForSkiaFlush(
shared_context_state_->vk_context_provider(), &flush_info);
- dest_scoped_access->surface()->flush(
+ auto result = dest_scoped_access->surface()->flush(
SkSurface::BackendSurfaceAccess::kNoAccess, flush_info);
+ if (!end_semaphores.empty()) {
+ DCHECK_EQ(result, GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ }
if (!images[YUVConversionMailboxIndex::kDestIndex]->IsCleared() &&
drew_image) {
@@ -2899,13 +2887,15 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
.fNumSemaphores = end_semaphores_.size(),
.fSignalSemaphores = end_semaphores_.data(),
};
- AddVulkanCleanupTaskForSkiaFlush(
- shared_context_state_->vk_context_provider(), &flush_info);
auto result = sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent,
flush_info);
- DCHECK(result == GrSemaphoresSubmitted::kYes || end_semaphores_.empty());
- end_semaphores_.clear();
-
+ // If |end_semaphores_| is not empty, we will submit work to the queue.
+ // Otherwise the queue submission can be deferred..
+ if (!end_semaphores_.empty()) {
+ DCHECK(result == GrSemaphoresSubmitted::kYes);
+ gr_context()->submit();
+ end_semaphores_.clear();
+ }
// The DDL pins memory for the recorded ops so it must be kept alive until
// its flushed.
ddl_.reset();
@@ -2913,13 +2903,10 @@ void RasterDecoderImpl::DoEndRasterCHROMIUM() {
shared_context_state_->UpdateSkiaOwnedMemorySize();
sk_surface_ = nullptr;
- if (!shared_image_) {
- // Test only path for SetUpForRasterCHROMIUMForTest.
- sk_surface_for_testing_.reset();
- } else {
- scoped_shared_image_write_.reset();
- shared_image_.reset();
- }
+ scoped_shared_image_write_.reset();
+ shared_image_.reset();
+ // Test only path for SetUpForRasterCHROMIUMForTest.
+ sk_surface_for_testing_.reset();
// Unlock all font handles. This needs to be deferred until
// SkSurface::flush since that flushes batched Gr operations
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
index 9375b1e17ae..ff476d4f3cf 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_base.cc
@@ -187,7 +187,7 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
new gl::GLShareGroup(), surface_, context_,
feature_info()->workarounds().use_virtualized_gl_contexts,
base::DoNothing(), GpuPreferences().gr_context_type);
-
+ shared_context_state_->disable_check_reset_status_throttling_for_test_ = true;
shared_context_state_->InitializeGL(GpuPreferences(), feature_info_);
command_buffer_service_.reset(new FakeCommandBufferServiceBase());
@@ -213,10 +213,14 @@ void RasterDecoderTestBase::InitDecoder(const InitState& init) {
gpu::ContextResult::kSuccess);
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*gl_, GetError())
+ .WillOnce(Return(GL_NO_ERROR))
+ .RetiresOnSaturation();
if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_NO_ERROR));
}
+
decoder_->MakeCurrent();
decoder_->BeginDecoding();
diff --git a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
index 2f778122fa8..d82e5e8b852 100644
--- a/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
+++ b/chromium/gpu/command_buffer/service/raster_decoder_unittest_context_lost.cc
@@ -34,13 +34,13 @@ class RasterDecoderOOMTest : public RasterDecoderManualInitTest {
if (context_->HasRobustness()) {
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_CONTEXT_LOST_KHR));
+ } else {
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_NO_ERROR));
}
- // glGetError merges driver error state with decoder error state. Return
- // GL_NO_ERROR from mock driver and GL_OUT_OF_MEMORY from decoder.
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_NO_ERROR))
- .RetiresOnSaturation();
+ // RasterDecoder::HandleGetError merges driver error state with decoder
+ // error state. Return GL_OUT_OF_MEMORY from decoder.
GetDecoder()->SetOOMErrorForTest();
cmds::GetError cmd;
@@ -112,9 +112,9 @@ class RasterDecoderLostContextTest : public RasterDecoderManualInitTest {
void DoGetErrorWithContextLost(GLenum reset_status) {
DCHECK(context_->HasExtension("GL_KHR_robustness"));
- EXPECT_CALL(*gl_, GetError())
- .WillOnce(Return(GL_CONTEXT_LOST_KHR))
- .RetiresOnSaturation();
+ // Once context loss has occurred, driver will always return
+ // GL_CONTEXT_LOST_KHR.
+ EXPECT_CALL(*gl_, GetError()).WillRepeatedly(Return(GL_CONTEXT_LOST_KHR));
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(reset_status));
cmds::GetError cmd;
@@ -147,6 +147,20 @@ TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrent) {
ClearCurrentDecoderError();
}
+TEST_P(RasterDecoderLostContextTest, LostFromDriverOOM) {
+ Init(/*has_robustness=*/false);
+ EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*gl_, GetError()).WillOnce(Return(GL_OUT_OF_MEMORY));
+ EXPECT_FALSE(decoder_->WasContextLost());
+ decoder_->MakeCurrent();
+ EXPECT_TRUE(decoder_->WasContextLost());
+ EXPECT_EQ(error::kOutOfMemory, GetContextLostReason());
+
+ // We didn't process commands, so we need to clear the decoder error,
+ // so that we can shut down cleanly.
+ ClearCurrentDecoderError();
+}
+
TEST_P(RasterDecoderLostContextTest, LostFromMakeCurrentWithRobustness) {
Init(/*has_robustness=*/true); // with robustness
// If we can't make the context current, we cannot query the robustness
@@ -215,6 +229,7 @@ TEST_P(RasterDecoderLostContextTest, LostFromResetAfterMakeCurrent) {
Init(/*has_robustness=*/true);
InSequence seq;
EXPECT_CALL(*context_, MakeCurrent(surface_.get())).WillOnce(Return(true));
+ EXPECT_CALL(*gl_, GetError()).WillOnce(Return(GL_CONTEXT_LOST_KHR));
EXPECT_CALL(*gl_, GetGraphicsResetStatusARB())
.WillOnce(Return(GL_GUILTY_CONTEXT_RESET_KHR));
decoder_->MakeCurrent();
diff --git a/chromium/gpu/command_buffer/service/sampler_manager.h b/chromium/gpu/command_buffer/service/sampler_manager.h
index 2b46c8dd099..9828eb0354e 100644
--- a/chromium/gpu/command_buffer/service/sampler_manager.h
+++ b/chromium/gpu/command_buffer/service/sampler_manager.h
@@ -8,7 +8,6 @@
#include <unordered_map>
#include <vector>
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/feature_info.h"
diff --git a/chromium/gpu/command_buffer/service/scheduler.cc b/chromium/gpu/command_buffer/service/scheduler.cc
index 95bc584365f..b9d88280b52 100644
--- a/chromium/gpu/command_buffer/service/scheduler.cc
+++ b/chromium/gpu/command_buffer/service/scheduler.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/callback.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
diff --git a/chromium/gpu/command_buffer/service/service_font_manager.cc b/chromium/gpu/command_buffer/service/service_font_manager.cc
index 50a1b4a74e2..dc41e910e7f 100644
--- a/chromium/gpu/command_buffer/service/service_font_manager.cc
+++ b/chromium/gpu/command_buffer/service/service_font_manager.cc
@@ -7,6 +7,7 @@
#include <inttypes.h>
#include "base/debug/dump_without_crashing.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/rand_util.h"
#include "base/strings/stringprintf.h"
diff --git a/chromium/gpu/command_buffer/service/service_utils.cc b/chromium/gpu/command_buffer/service/service_utils.cc
index 4432b9222dc..bd7c709e241 100644
--- a/chromium/gpu/command_buffer/service/service_utils.cc
+++ b/chromium/gpu/command_buffer/service/service_utils.cc
@@ -165,8 +165,7 @@ GpuPreferences ParseGpuPreferences(const base::CommandLine* command_line) {
gpu_preferences.enable_dawn_backend_validation =
command_line->HasSwitch(switches::kEnableDawnBackendValidation);
gpu_preferences.gr_context_type = ParseGrContextType();
- gpu_preferences.use_vulkan = ParseVulkanImplementationName(
- command_line, gpu_preferences.gr_context_type);
+ gpu_preferences.use_vulkan = ParseVulkanImplementationName(command_line);
gpu_preferences.disable_vulkan_surface =
command_line->HasSwitch(switches::kDisableVulkanSurface);
@@ -192,8 +191,7 @@ GrContextType ParseGrContextType() {
}
VulkanImplementationName ParseVulkanImplementationName(
- const base::CommandLine* command_line,
- GrContextType gr_context_type) {
+ const base::CommandLine* command_line) {
if (command_line->HasSwitch(switches::kUseVulkan)) {
auto value = command_line->GetSwitchValueASCII(switches::kUseVulkan);
if (value.empty() || value == switches::kVulkanImplementationNameNative) {
@@ -202,11 +200,17 @@ VulkanImplementationName ParseVulkanImplementationName(
return VulkanImplementationName::kSwiftshader;
}
}
- // If the vulkan implementation is not set from --use-vulkan, the native
- // vulkan implementation will be used by default.
- return gr_context_type == GrContextType::kVulkan
- ? VulkanImplementationName::kNative
- : VulkanImplementationName::kNone;
+
+ // GrContext is not going to use Vulkan.
+ if (!base::FeatureList::IsEnabled(features::kVulkan))
+ return VulkanImplementationName::kNone;
+
+ // If the vulkan feature is enabled from command line, we will force to use
+ // vulkan even if it is blacklisted.
+ return base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine(
+ features::kVulkan.name, base::FeatureList::OVERRIDE_ENABLE_FEATURE)
+ ? VulkanImplementationName::kForcedNative
+ : VulkanImplementationName::kNative;
}
} // namespace gles2
diff --git a/chromium/gpu/command_buffer/service/service_utils.h b/chromium/gpu/command_buffer/service/service_utils.h
index 76a802e433c..e10ff73dcfb 100644
--- a/chromium/gpu/command_buffer/service/service_utils.h
+++ b/chromium/gpu/command_buffer/service/service_utils.h
@@ -40,10 +40,10 @@ ParseGpuPreferences(const base::CommandLine* command_line);
GPU_GLES2_EXPORT GrContextType ParseGrContextType();
// Parse the value of --use-vulkan from the command line. If unspecified and
-// a Vulkan GrContext is going to be used, default to the native implementation.
+// features::kVulkan is enabled (GrContext is going to use vulkan), default to
+// the native implementation.
GPU_GLES2_EXPORT VulkanImplementationName
-ParseVulkanImplementationName(const base::CommandLine* command_line,
- GrContextType gr_context_type);
+ParseVulkanImplementationName(const base::CommandLine* command_line);
} // namespace gles2
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shader_manager.h b/chromium/gpu/command_buffer/service/shader_manager.h
index deb4a491486..873f92ddcdf 100644
--- a/chromium/gpu/command_buffer/service/shader_manager.h
+++ b/chromium/gpu/command_buffer/service/shader_manager.h
@@ -8,7 +8,7 @@
#include <string>
#include <unordered_map>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/gl_utils.h"
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.cc b/chromium/gpu/command_buffer/service/shared_context_state.cc
index 765ad3a0d62..900a182c781 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.cc
+++ b/chromium/gpu/command_buffer/service/shared_context_state.cc
@@ -69,7 +69,7 @@ size_t MaxNumSkSurface() {
namespace gpu {
void SharedContextState::compileError(const char* shader, const char* errors) {
- if (!context_lost_) {
+ if (!context_lost()) {
LOG(ERROR) << "Skia shader compilation error\n"
<< "------------------------\n"
<< shader << "\nErrors:\n"
@@ -163,7 +163,7 @@ SharedContextState::~SharedContextState() {
// The context should be current so that texture deletes that result from
// destroying the cache happen in the right context (unless the context is
// lost in which case we don't delete the textures).
- DCHECK(IsCurrent(nullptr) || context_lost_);
+ DCHECK(IsCurrent(nullptr) || context_lost());
transfer_cache_.reset();
// We should have the last ref on this GrContext to ensure we're not holding
@@ -191,7 +191,7 @@ SharedContextState::~SharedContextState() {
this);
}
-void SharedContextState::InitializeGrContext(
+bool SharedContextState::InitializeGrContext(
const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
@@ -220,7 +220,7 @@ void SharedContextState::InitializeGrContext(
if (!interface) {
LOG(ERROR) << "OOP raster support disabled: GrGLInterface creation "
"failed.";
- return;
+ return false;
}
if (activity_flags && cache) {
@@ -255,12 +255,13 @@ void SharedContextState::InitializeGrContext(
}
if (!gr_context_) {
- LOG(ERROR) << "OOP raster support disabled: GrContext creation "
- "failed.";
- } else {
- gr_context_->setResourceCacheLimit(max_resource_cache_bytes);
+ LOG(ERROR) << "OOP raster support disabled: GrContext creation failed.";
+ return false;
}
+
+ gr_context_->setResourceCacheLimit(max_resource_cache_bytes);
transfer_cache_ = std::make_unique<ServiceTransferCache>(gpu_preferences);
+ return true;
}
bool SharedContextState::InitializeGL(
@@ -424,28 +425,23 @@ bool SharedContextState::InitializeGL(
}
bool SharedContextState::MakeCurrent(gl::GLSurface* surface, bool needs_gl) {
- if (context_lost_)
+ if (context_lost())
return false;
- if (gr_context_ && gr_context_->abandoned()) {
- MarkContextLost();
- return false;
- }
-
- if (!GrContextIsGL() && !needs_gl)
- return true;
-
- gl::GLSurface* dont_care_surface =
- last_current_surface_ ? last_current_surface_ : surface_.get();
- surface = surface ? surface : dont_care_surface;
+ const bool using_gl = GrContextIsGL() || needs_gl;
+ if (using_gl) {
+ gl::GLSurface* dont_care_surface =
+ last_current_surface_ ? last_current_surface_ : surface_.get();
+ surface = surface ? surface : dont_care_surface;
- if (!context_->MakeCurrent(surface)) {
- MarkContextLost();
- return false;
+ if (!context_->MakeCurrent(surface)) {
+ MarkContextLost(error::kMakeCurrentFailed);
+ return false;
+ }
+ last_current_surface_ = surface;
}
- last_current_surface_ = surface;
- return true;
+ return !CheckResetStatus(needs_gl);
}
void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) {
@@ -456,14 +452,14 @@ void SharedContextState::ReleaseCurrent(gl::GLSurface* surface) {
return;
last_current_surface_ = nullptr;
- if (!context_lost_)
+ if (!context_lost())
context_->ReleaseCurrent(surface);
}
-void SharedContextState::MarkContextLost() {
- if (!context_lost_) {
+void SharedContextState::MarkContextLost(error::ContextLostReason reason) {
+ if (!context_lost()) {
scoped_refptr<SharedContextState> prevent_last_ref_drop = this;
- context_lost_ = true;
+ context_lost_reason_ = reason;
// context_state_ could be nullptr for some unittests.
if (context_state_)
context_state_->MarkContextLost();
@@ -486,7 +482,7 @@ void SharedContextState::MarkContextLost() {
bool SharedContextState::IsCurrent(gl::GLSurface* surface) {
if (!GrContextIsGL())
return true;
- if (context_lost_)
+ if (context_lost())
return false;
return context_->IsCurrent(surface);
}
@@ -669,4 +665,79 @@ QueryManager* SharedContextState::GetQueryManager() {
return nullptr;
}
+bool SharedContextState::CheckResetStatus(bool needs_gl) {
+ DCHECK(!context_lost());
+
+ if (device_needs_reset_)
+ return true;
+
+ if (gr_context_) {
+ // Maybe Skia detected VK_ERROR_DEVICE_LOST.
+ if (gr_context_->abandoned()) {
+ LOG(ERROR) << "SharedContextState context lost via Skia.";
+ device_needs_reset_ = true;
+ MarkContextLost(error::kUnknown);
+ return true;
+ }
+
+ if (gr_context_->oomed()) {
+ LOG(ERROR) << "SharedContextState context lost via Skia OOM.";
+ device_needs_reset_ = true;
+ MarkContextLost(error::kOutOfMemory);
+ return true;
+ }
+ }
+
+ // Not using GL.
+ if (!GrContextIsGL() && !needs_gl)
+ return false;
+
+ // GL is not initialized.
+ if (!context_state_)
+ return false;
+
+ GLenum error = context_state_->api()->glGetErrorFn();
+ if (error == GL_OUT_OF_MEMORY) {
+ LOG(ERROR) << "SharedContextState lost due to GL_OUT_OF_MEMORY";
+ MarkContextLost(error::kOutOfMemory);
+ device_needs_reset_ = true;
+ return true;
+ }
+
+ // Checking the reset status is expensive on some OS/drivers
+ // (https://crbug.com/1090232). Rate limit it.
+ constexpr base::TimeDelta kMinCheckDelay =
+ base::TimeDelta::FromMilliseconds(5);
+ base::Time now = base::Time::Now();
+ if (!disable_check_reset_status_throttling_for_test_ &&
+ now < last_gl_check_graphics_reset_status_ + kMinCheckDelay) {
+ return false;
+ }
+ last_gl_check_graphics_reset_status_ = now;
+
+ GLenum driver_status = context()->CheckStickyGraphicsResetStatus();
+ if (driver_status == GL_NO_ERROR)
+ return false;
+ LOG(ERROR) << "SharedContextState context lost via ARB/EXT_robustness. Reset "
+ "status = "
+ << gles2::GLES2Util::GetStringEnum(driver_status);
+
+ switch (driver_status) {
+ case GL_GUILTY_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kGuilty);
+ break;
+ case GL_INNOCENT_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kInnocent);
+ break;
+ case GL_UNKNOWN_CONTEXT_RESET_ARB:
+ MarkContextLost(error::kUnknown);
+ break;
+ default:
+ NOTREACHED();
+ break;
+ }
+ device_needs_reset_ = true;
+ return true;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_context_state.h b/chromium/gpu/command_buffer/service/shared_context_state.h
index 21e9687ffa5..6a7fcf0fc12 100644
--- a/chromium/gpu/command_buffer/service/shared_context_state.h
+++ b/chromium/gpu/command_buffer/service/shared_context_state.h
@@ -13,8 +13,12 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list.h"
+#include "base/optional.h"
+#include "base/time/time.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
+#include "gpu/command_buffer/common/constants.h"
+#include "gpu/command_buffer/common/gl2_types.h"
#include "gpu/command_buffer/common/skia_utils.h"
#include "gpu/command_buffer/service/gl_context_virtual_delegate.h"
#include "gpu/command_buffer/service/memory_tracking.h"
@@ -47,6 +51,10 @@ class FeatureInfo;
struct ContextState;
} // namespace gles2
+namespace raster {
+class RasterDecoderTestBase;
+} // namespace raster
+
class GPU_GLES2_EXPORT SharedContextState
: public base::trace_event::MemoryDumpProvider,
public gpu::GLContextVirtualDelegate,
@@ -68,7 +76,7 @@ class GPU_GLES2_EXPORT SharedContextState
base::WeakPtr<gpu::MemoryTracker::Observer> peak_memory_monitor =
nullptr);
- void InitializeGrContext(const GpuPreferences& gpu_preferences,
+ bool InitializeGrContext(const GpuPreferences& gpu_preferences,
const GpuDriverBugWorkarounds& workarounds,
GrContextOptions::PersistentCache* cache,
GpuProcessActivityFlags* activity_flags = nullptr,
@@ -92,7 +100,7 @@ class GPU_GLES2_EXPORT SharedContextState
bool MakeCurrent(gl::GLSurface* surface, bool needs_gl = false);
void ReleaseCurrent(gl::GLSurface* surface);
- void MarkContextLost();
+ void MarkContextLost(error::ContextLostReason reason = error::kUnknown);
bool IsCurrent(gl::GLSurface* surface);
void PurgeMemory(
@@ -122,7 +130,10 @@ class GPU_GLES2_EXPORT SharedContextState
void compileError(const char* shader, const char* errors) override;
gles2::FeatureInfo* feature_info() { return feature_info_.get(); }
gles2::ContextState* context_state() const { return context_state_.get(); }
- bool context_lost() const { return context_lost_; }
+ bool context_lost() const { return !!context_lost_reason_; }
+ base::Optional<error::ContextLostReason> context_lost_reason() {
+ return context_lost_reason_;
+ }
bool need_context_state_reset() const { return need_context_state_reset_; }
void set_need_context_state_reset(bool reset) {
need_context_state_reset_ = reset;
@@ -179,8 +190,14 @@ class GPU_GLES2_EXPORT SharedContextState
return found->second->unique();
}
+ // Updates |context_lost_reason| and returns true if lost
+ // (e.g. VK_ERROR_DEVICE_LOST or GL_UNKNOWN_CONTEXT_RESET_ARB).
+ bool CheckResetStatus(bool needs_gl);
+ bool device_needs_reset() { return device_needs_reset_; }
+
private:
friend class base::RefCounted<SharedContextState>;
+ friend class raster::RasterDecoderTestBase;
// Observer which is notified when SkiaOutputSurfaceImpl takes ownership of a
// shared image, and forward information to both histograms and task manager.
@@ -265,11 +282,15 @@ class GPU_GLES2_EXPORT SharedContextState
// driver's GL state.
bool need_context_state_reset_ = false;
- bool context_lost_ = false;
+ base::Optional<error::ContextLostReason> context_lost_reason_;
base::ObserverList<ContextLostObserver>::Unchecked context_lost_observers_;
base::MRUCache<void*, sk_sp<SkSurface>> sk_surface_cache_;
+ bool device_needs_reset_ = false;
+ base::Time last_gl_check_graphics_reset_status_;
+ bool disable_check_reset_status_throttling_for_test_ = false;
+
base::WeakPtrFactory<SharedContextState> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SharedContextState);
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
index 3117ba53f49..6cb0ebeb6b6 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_egl_image.cc
@@ -37,7 +37,8 @@ class SharedImageRepresentationEglImageGLTexture
}
bool BeginAccess(GLenum mode) override {
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) {
if (!egl_backing()->BeginRead(this))
return false;
mode_ = RepresentationAccessMode::kRead;
@@ -262,10 +263,10 @@ gles2::Texture* SharedImageBackingEglImage::GenEGLImageSibling() {
auto* texture = new gles2::Texture(service_id);
texture->SetLightweightRef();
texture->SetTarget(target, 1 /*max_levels*/);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture->set_min_filter(GL_LINEAR);
+ texture->set_mag_filter(GL_LINEAR);
+ texture->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture->set_wrap_s(GL_CLAMP_TO_EDGE);
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
index cc4b235d582..044f201a9e9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_ahardwarebuffer.cc
@@ -70,6 +70,9 @@ class OverlayImage final : public gl::GLImage {
base::ScopedFD TakeEndFence() {
DCHECK(!begin_read_fence_.is_valid());
+
+ previous_end_read_fence_ =
+ base::ScopedFD(HANDLE_EINTR(dup(end_read_fence_.get())));
return std::move(end_read_fence_);
}
@@ -78,7 +81,7 @@ class OverlayImage final : public gl::GLImage {
GetAHardwareBuffer() override {
return std::make_unique<ScopedHardwareBufferFenceSyncImpl>(
this, base::android::ScopedHardwareBufferHandle::Create(handle_.get()),
- std::move(begin_read_fence_));
+ std::move(begin_read_fence_), std::move(previous_end_read_fence_));
}
protected:
@@ -91,14 +94,20 @@ class OverlayImage final : public gl::GLImage {
ScopedHardwareBufferFenceSyncImpl(
scoped_refptr<OverlayImage> image,
base::android::ScopedHardwareBufferHandle handle,
- base::ScopedFD fence_fd)
- : ScopedHardwareBufferFenceSync(std::move(handle), std::move(fence_fd)),
+ base::ScopedFD fence_fd,
+ base::ScopedFD available_fence_fd)
+ : ScopedHardwareBufferFenceSync(std::move(handle),
+ std::move(fence_fd),
+ std::move(available_fence_fd),
+ false /* is_video */),
image_(std::move(image)) {}
~ScopedHardwareBufferFenceSyncImpl() override = default;
void SetReadFence(base::ScopedFD fence_fd, bool has_context) override {
DCHECK(!image_->begin_read_fence_.is_valid());
DCHECK(!image_->end_read_fence_.is_valid());
+ DCHECK(!image_->previous_end_read_fence_.is_valid());
+
image_->end_read_fence_ = std::move(fence_fd);
}
@@ -115,6 +124,10 @@ class OverlayImage final : public gl::GLImage {
// completion. The image content should not be modified before passing this
// fence.
base::ScopedFD end_read_fence_;
+
+ // The fence for overlay controller from the last frame where this buffer was
+ // presented.
+ base::ScopedFD previous_end_read_fence_;
};
} // namespace
@@ -170,7 +183,6 @@ class SharedImageBackingAHB : public ClearTrackingSharedImageBacking {
MemoryTypeTracker* tracker) override;
private:
- gles2::Texture* GenGLTexture();
const base::android::ScopedHardwareBufferHandle hardware_buffer_handle_;
// Not guarded by |lock_| as we do not use legacy_texture_ in threadsafe
@@ -213,7 +225,8 @@ class SharedImageRepresentationGLTextureAHB
gles2::Texture* GetTexture() override { return texture_; }
bool BeginAccess(GLenum mode) override {
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) {
base::ScopedFD write_sync_fd;
if (!ahb_backing()->BeginRead(this, &write_sync_fd))
return false;
@@ -228,7 +241,8 @@ class SharedImageRepresentationGLTextureAHB
return false;
}
- if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM) {
mode_ = RepresentationAccessMode::kRead;
} else if (mode == GL_SHARED_IMAGE_ACCESS_MODE_READWRITE_CHROMIUM) {
mode_ = RepresentationAccessMode::kWrite;
@@ -316,12 +330,12 @@ class SharedImageRepresentationSkiaVkAHB
surface_props != surface_->props()) {
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- surface_ = SkSurface::MakeFromBackendTextureAsRenderTarget(
+ surface_ = SkSurface::MakeFromBackendTexture(
gr_context, promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
color_space().ToSkColorSpace(), &surface_props);
if (!surface_) {
- LOG(ERROR) << "MakeFromBackendTextureAsRenderTarget() failed.";
+ LOG(ERROR) << "MakeFromBackendTexture() failed.";
return nullptr;
}
surface_msaa_count_ = final_msaa_count;
@@ -582,7 +596,9 @@ bool SharedImageBackingAHB::ProduceLegacyMailbox(
DCHECK(!is_writing_);
DCHECK_EQ(size_t{0}, active_readers_.size());
DCHECK(hardware_buffer_handle_.is_valid());
- legacy_texture_ = GenGLTexture();
+ legacy_texture_ =
+ GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(),
+ size(), estimated_size(), ClearedRect());
if (!legacy_texture_)
return false;
// Make sure our |legacy_texture_| has the right initial cleared rect.
@@ -602,7 +618,16 @@ SharedImageBackingAHB::ProduceGLTexture(SharedImageManager* manager,
MemoryTypeTracker* tracker) {
// Use same texture for all the texture representations generated from same
// backing.
- auto* texture = GenGLTexture();
+ DCHECK(hardware_buffer_handle_.is_valid());
+
+ // Note that we are not using GL_TEXTURE_EXTERNAL_OES target(here and all
+ // other places in this file) since sksurface
+ // doesn't supports it. As per the egl documentation -
+ // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt
+ // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D.
+ auto* texture =
+ GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(),
+ size(), estimated_size(), ClearedRect());
if (!texture)
return nullptr;
@@ -620,11 +645,9 @@ SharedImageBackingAHB::ProduceSkia(
// Check whether we are in Vulkan mode OR GL mode and accordingly create
// Skia representation.
if (context_state->GrContextIsVulkan()) {
- auto* device_queue = context_state->vk_context_provider()->GetDeviceQueue();
- gfx::GpuMemoryBufferHandle gmb_handle(GetAhbHandle());
- auto vulkan_image = VulkanImage::CreateFromGpuMemoryBufferHandle(
- device_queue, std::move(gmb_handle), size(), ToVkFormat(format()),
- 0 /* usage */);
+ auto vulkan_image = CreateVkImageFromAhbHandle(
+ GetAhbHandle(), context_state.get(), size(), format());
+
if (!vulkan_image)
return nullptr;
@@ -633,8 +656,10 @@ SharedImageBackingAHB::ProduceSkia(
tracker);
}
DCHECK(context_state->GrContextIsGL());
-
- auto* texture = GenGLTexture();
+ DCHECK(hardware_buffer_handle_.is_valid());
+ auto* texture =
+ GenGLTexture(hardware_buffer_handle_.get(), GL_TEXTURE_2D, color_space(),
+ size(), estimated_size(), ClearedRect());
if (!texture)
return nullptr;
auto gl_representation =
@@ -759,64 +784,6 @@ void SharedImageBackingAHB::EndOverlayAccess() {
read_sync_fd_ = gl::MergeFDs(std::move(read_sync_fd_), std::move(fence_fd));
}
-gles2::Texture* SharedImageBackingAHB::GenGLTexture() {
- DCHECK(hardware_buffer_handle_.is_valid());
-
- // Target for AHB backed egl images.
- // Note that we are not using GL_TEXTURE_EXTERNAL_OES target since sksurface
- // doesn't supports it. As per the egl documentation -
- // https://www.khronos.org/registry/OpenGL/extensions/OES/OES_EGL_image_external.txt
- // if GL_OES_EGL_image is supported then <target> may also be TEXTURE_2D.
- GLenum target = GL_TEXTURE_2D;
- GLenum get_target = GL_TEXTURE_BINDING_2D;
-
- // Create a gles2 texture using the AhardwareBuffer.
- gl::GLApi* api = gl::g_current_gl_context;
- GLuint service_id = 0;
- api->glGenTexturesFn(1, &service_id);
- GLint old_texture_binding = 0;
- api->glGetIntegervFn(get_target, &old_texture_binding);
- api->glBindTextureFn(target, service_id);
- api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
-
- // Create an egl image using AHardwareBuffer.
- auto egl_image = base::MakeRefCounted<gl::GLImageAHardwareBuffer>(size());
- if (!egl_image->Initialize(hardware_buffer_handle_.get(), false)) {
- LOG(ERROR) << "Failed to create EGL image";
- api->glBindTextureFn(target, old_texture_binding);
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- if (!egl_image->BindTexImage(target)) {
- LOG(ERROR) << "Failed to bind egl image";
- api->glBindTextureFn(target, old_texture_binding);
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- egl_image->SetColorSpace(color_space());
-
- // Create a gles2 Texture.
- auto* texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(target, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
-
- texture->SetLevelInfo(target, 0, egl_image->GetInternalFormat(),
- size().width(), size().height(), 1, 0,
- egl_image->GetDataFormat(), egl_image->GetDataType(),
- ClearedRect());
- texture->SetLevelImage(target, 0, egl_image.get(), gles2::Texture::BOUND);
- texture->SetImmutable(true, false);
- api->glBindTextureFn(target, old_texture_binding);
- return texture;
-}
-
SharedImageBackingFactoryAHB::SharedImageBackingFactoryAHB(
const GpuDriverBugWorkarounds& workarounds,
const GpuFeatureInfo& gpu_feature_info) {
@@ -1065,11 +1032,8 @@ SharedImageBackingFactoryAHB::CreateSharedImage(
const gfx::ColorSpace& color_space,
uint32_t usage,
base::span<const uint8_t> pixel_data) {
- auto backing =
- MakeBacking(mailbox, format, size, color_space, usage, false, pixel_data);
- if (backing)
- backing->OnWriteSucceeded();
- return backing;
+ return MakeBacking(mailbox, format, size, color_space, usage, false,
+ pixel_data);
}
bool SharedImageBackingFactoryAHB::CanImportGpuMemoryBuffer(
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
index 91798bb9b36..bd4e77afcbd 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.cc
@@ -25,6 +25,7 @@
#include "gpu/command_buffer/service/service_utils.h"
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h"
#include "gpu/command_buffer/service/shared_image_factory.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/command_buffer/service/skia_utils.h"
@@ -51,6 +52,10 @@
#include "gpu/command_buffer/service/shared_image_batch_access_manager.h"
#endif
+#if defined(OS_MACOSX)
+#include "gpu/command_buffer/service/shared_image_backing_factory_iosurface.h"
+#endif
+
namespace gpu {
namespace {
@@ -189,23 +194,6 @@ class ScopedRestoreTexture {
DISALLOW_COPY_AND_ASSIGN(ScopedRestoreTexture);
};
-GLuint MakeTextureAndSetParameters(gl::GLApi* api,
- GLenum target,
- bool framebuffer_attachment_angle) {
- GLuint service_id = 0;
- api->glGenTexturesFn(1, &service_id);
- api->glBindTextureFn(target, service_id);
- api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
- api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
- if (framebuffer_attachment_angle) {
- api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE,
- GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
- }
- return service_id;
-}
-
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawnCommon(
SharedImageFactory* factory,
SharedImageManager* manager,
@@ -301,518 +289,718 @@ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawnCommon(
return manager->ProduceDawn(dst_mailbox, tracker, device);
}
+size_t EstimatedSize(viz::ResourceFormat format, const gfx::Size& size) {
+ size_t estimated_size = 0;
+ viz::ResourceSizes::MaybeSizeInBytes(size, format, &estimated_size);
+ return estimated_size;
+}
+
} // anonymous namespace
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageRepresentationGLTextureImpl
+
// Representation of a SharedImageBackingGLTexture as a GL Texture.
-class SharedImageRepresentationGLTextureImpl
- : public SharedImageRepresentationGLTexture {
- public:
- SharedImageRepresentationGLTextureImpl(SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- gles2::Texture* texture)
- : SharedImageRepresentationGLTexture(manager, backing, tracker),
- texture_(texture) {}
+SharedImageRepresentationGLTextureImpl::SharedImageRepresentationGLTextureImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture)
+ : SharedImageRepresentationGLTexture(manager, backing, tracker),
+ client_(client),
+ texture_(texture) {}
- gles2::Texture* GetTexture() override { return texture_; }
+gles2::Texture* SharedImageRepresentationGLTextureImpl::GetTexture() {
+ return texture_;
+}
- private:
- gles2::Texture* texture_;
-};
+bool SharedImageRepresentationGLTextureImpl::BeginAccess(GLenum mode) {
+ if (client_)
+ return client_->OnGLTextureBeginAccess(mode);
+ return true;
+}
-// Representation of a SharedImageBackingGLTexturePassthrough as a GL
-// TexturePassthrough.
-class SharedImageRepresentationGLTexturePassthroughImpl
- : public SharedImageRepresentationGLTexturePassthrough {
- public:
- SharedImageRepresentationGLTexturePassthroughImpl(
- SharedImageManager* manager,
- SharedImageBacking* backing,
- MemoryTypeTracker* tracker,
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
- : SharedImageRepresentationGLTexturePassthrough(manager,
- backing,
- tracker),
- texture_passthrough_(std::move(texture_passthrough)) {}
-
- const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
- override {
- return texture_passthrough_;
- }
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageRepresentationGLTexturePassthroughImpl
+
+SharedImageRepresentationGLTexturePassthroughImpl::
+ SharedImageRepresentationGLTexturePassthroughImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
+ : SharedImageRepresentationGLTexturePassthrough(manager, backing, tracker),
+ client_(client),
+ texture_passthrough_(std::move(texture_passthrough)) {}
+
+SharedImageRepresentationGLTexturePassthroughImpl::
+ ~SharedImageRepresentationGLTexturePassthroughImpl() = default;
+
+const scoped_refptr<gles2::TexturePassthrough>&
+SharedImageRepresentationGLTexturePassthroughImpl::GetTexturePassthrough() {
+ return texture_passthrough_;
+}
- void EndAccess() override {
- GLenum target = texture_passthrough_->target();
- gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0);
- if (!image)
- return;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
- gl::ScopedTextureBinder binder(target,
- texture_passthrough_->service_id());
- image->ReleaseTexImage(target);
- image->BindTexImage(target);
- }
- }
+bool SharedImageRepresentationGLTexturePassthroughImpl::BeginAccess(
+ GLenum mode) {
+ if (client_)
+ return client_->OnGLTexturePassthroughBeginAccess(mode);
+ return true;
+}
- private:
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
-};
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingGLCommon
-class SharedImageBackingWithReadAccess : public SharedImageBacking {
- public:
- SharedImageBackingWithReadAccess(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- size_t estimated_size,
- bool is_thread_safe)
- : SharedImageBacking(mailbox,
- format,
- size,
- color_space,
- usage,
- estimated_size,
- is_thread_safe) {}
- ~SharedImageBackingWithReadAccess() override = default;
-
- virtual void BeginReadAccess() = 0;
-};
+// static
+void SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ GLenum target,
+ GLuint service_id,
+ bool framebuffer_attachment_angle,
+ scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
+ gles2::Texture** texture) {
+ if (!service_id) {
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
-class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
- public:
- SharedImageRepresentationSkiaImpl(
- SharedImageManager* manager,
- SharedImageBackingWithReadAccess* backing,
- scoped_refptr<SharedContextState> context_state,
- sk_sp<SkPromiseImageTexture> cached_promise_texture,
- MemoryTypeTracker* tracker,
- GLenum target,
- GLuint service_id)
- : SharedImageRepresentationSkia(manager, backing, tracker),
- context_state_(std::move(context_state)),
- promise_texture_(cached_promise_texture) {
- if (!promise_texture_) {
- GrBackendTexture backend_texture;
- GetGrBackendTexture(context_state_->feature_info(), target, size(),
- service_id, format(), &backend_texture);
- promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ api->glGenTexturesFn(1, &service_id);
+ api->glBindTextureFn(target, service_id);
+ api->glTexParameteriFn(target, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
+ api->glTexParameteriFn(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
+ if (framebuffer_attachment_angle) {
+ api->glTexParameteriFn(target, GL_TEXTURE_USAGE_ANGLE,
+ GL_FRAMEBUFFER_ATTACHMENT_ANGLE);
}
+ }
+ if (passthrough_texture) {
+ *passthrough_texture =
+ base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
+ }
+ if (texture) {
+ *texture = new gles2::Texture(service_id);
+ (*texture)->SetLightweightRef();
+ (*texture)->SetTarget(target, 1);
+ (*texture)->set_min_filter(GL_LINEAR);
+ (*texture)->set_mag_filter(GL_LINEAR);
+ (*texture)->set_wrap_s(GL_CLAMP_TO_EDGE);
+ (*texture)->set_wrap_t(GL_CLAMP_TO_EDGE);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageRepresentationSkiaImpl
+
+SharedImageRepresentationSkiaImpl::SharedImageRepresentationSkiaImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ scoped_refptr<SharedContextState> context_state,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ MemoryTypeTracker* tracker)
+ : SharedImageRepresentationSkia(manager, backing, tracker),
+ client_(client),
+ context_state_(std::move(context_state)),
+ promise_texture_(promise_texture) {
+ DCHECK(promise_texture_);
#if DCHECK_IS_ON()
+ if (context_state_->GrContextIsGL())
context_ = gl::GLContext::GetCurrent();
#endif
- }
+}
- ~SharedImageRepresentationSkiaImpl() override {
- if (write_surface_) {
- DLOG(ERROR) << "SharedImageRepresentationSkia was destroyed while still "
- << "open for write access.";
- }
+SharedImageRepresentationSkiaImpl::~SharedImageRepresentationSkiaImpl() {
+ if (write_surface_) {
+ DLOG(ERROR) << "SharedImageRepresentationSkia was destroyed while still "
+ << "open for write access.";
}
+}
- sk_sp<SkSurface> BeginWriteAccess(
- int final_msaa_count,
- const SkSurfaceProps& surface_props,
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores) override {
- CheckContext();
- if (write_surface_)
- return nullptr;
-
- if (!promise_texture_) {
- return nullptr;
- }
- SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format());
- auto surface = SkSurface::MakeFromBackendTextureAsRenderTarget(
- context_state_->gr_context(), promise_texture_->backendTexture(),
- kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
- backing()->color_space().ToSkColorSpace(), &surface_props);
- write_surface_ = surface.get();
- return surface;
- }
+sk_sp<SkSurface> SharedImageRepresentationSkiaImpl::BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ CheckContext();
+ if (client_ && !client_->OnSkiaBeginWriteAccess())
+ return nullptr;
+ if (write_surface_)
+ return nullptr;
- void EndWriteAccess(sk_sp<SkSurface> surface) override {
- DCHECK_EQ(surface.get(), write_surface_);
- DCHECK(surface->unique());
- CheckContext();
- // TODO(ericrk): Keep the surface around for re-use.
- write_surface_ = nullptr;
+ if (!promise_texture_) {
+ return nullptr;
}
+ SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format());
+ auto surface = SkSurface::MakeFromBackendTexture(
+ context_state_->gr_context(), promise_texture_->backendTexture(),
+ kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
+ backing()->color_space().ToSkColorSpace(), &surface_props);
+ write_surface_ = surface.get();
+ return surface;
+}
- sk_sp<SkPromiseImageTexture> BeginReadAccess(
- std::vector<GrBackendSemaphore>* begin_semaphores,
- std::vector<GrBackendSemaphore>* end_semaphores) override {
- CheckContext();
- static_cast<SharedImageBackingWithReadAccess*>(backing())
- ->BeginReadAccess();
- return promise_texture_;
- }
+void SharedImageRepresentationSkiaImpl::EndWriteAccess(
+ sk_sp<SkSurface> surface) {
+ DCHECK_EQ(surface.get(), write_surface_);
+ DCHECK(surface->unique());
+ CheckContext();
+ // TODO(ericrk): Keep the surface around for re-use.
+ write_surface_ = nullptr;
+}
- void EndReadAccess() override {
- // TODO(ericrk): Handle begin/end correctness checks.
- }
+sk_sp<SkPromiseImageTexture> SharedImageRepresentationSkiaImpl::BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) {
+ CheckContext();
+ if (client_ && !client_->OnSkiaBeginReadAccess())
+ return nullptr;
+ return promise_texture_;
+}
- bool SupportsMultipleConcurrentReadAccess() override { return true; }
+void SharedImageRepresentationSkiaImpl::EndReadAccess() {
+ // TODO(ericrk): Handle begin/end correctness checks.
+}
- sk_sp<SkPromiseImageTexture> promise_texture() { return promise_texture_; }
+bool SharedImageRepresentationSkiaImpl::SupportsMultipleConcurrentReadAccess() {
+ return true;
+}
- private:
- void CheckContext() {
+void SharedImageRepresentationSkiaImpl::CheckContext() {
#if DCHECK_IS_ON()
+ if (context_)
DCHECK(gl::GLContext::GetCurrent() == context_);
#endif
- }
+}
- scoped_refptr<SharedContextState> context_state_;
- sk_sp<SkPromiseImageTexture> promise_texture_;
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingGLTexture
- SkSurface* write_surface_ = nullptr;
-#if DCHECK_IS_ON()
- gl::GLContext* context_;
-#endif
-};
-
-// Implementation of SharedImageBacking that creates a GL Texture and stores it
-// as a gles2::Texture. Can be used with the legacy mailbox implementation.
-class SharedImageBackingGLTexture : public SharedImageBackingWithReadAccess {
- public:
- SharedImageBackingGLTexture(const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- gles2::Texture* texture,
- const UnpackStateAttribs& attribs)
- : SharedImageBackingWithReadAccess(mailbox,
- format,
- size,
- color_space,
- usage,
- texture->estimated_size(),
- false /* is_thread_safe */),
- texture_(texture),
- attribs_(attribs) {
- DCHECK(texture_);
- gl::GLImage* image =
- texture_->GetLevelImage(texture_->target(), 0, nullptr);
- if (image)
- native_pixmap_ = image->GetNativePixmap();
+SharedImageBackingGLTexture::SharedImageBackingGLTexture(
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ bool is_passthrough)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ EstimatedSize(format, size),
+ false /* is_thread_safe */),
+ is_passthrough_(is_passthrough) {}
+
+SharedImageBackingGLTexture::~SharedImageBackingGLTexture() {
+ if (IsPassthrough()) {
+ if (passthrough_texture_) {
+ if (!have_context())
+ passthrough_texture_->MarkContextLost();
+ passthrough_texture_.reset();
+ }
+ } else {
+ if (texture_) {
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
+ }
}
+}
- ~SharedImageBackingGLTexture() override {
- DCHECK(texture_);
- texture_->RemoveLightweightRef(have_context());
- texture_ = nullptr;
+GLenum SharedImageBackingGLTexture::GetGLTarget() const {
+ return texture_ ? texture_->target() : passthrough_texture_->target();
+}
- if (rgb_emulation_texture_) {
- rgb_emulation_texture_->RemoveLightweightRef(have_context());
- rgb_emulation_texture_ = nullptr;
- }
+GLuint SharedImageBackingGLTexture::GetGLServiceId() const {
+ return texture_ ? texture_->service_id() : passthrough_texture_->service_id();
+}
+
+void SharedImageBackingGLTexture::OnMemoryDump(
+ const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) {
+ const auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ if (!IsPassthrough()) {
+ const auto service_guid =
+ gl::GetGLTextureServiceGUIDForTracing(texture_->service_id());
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+ pmd->AddOwnershipEdge(client_guid, service_guid, /* importance */ 2);
+ texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
}
+}
- gfx::Rect ClearedRect() const override {
+gfx::Rect SharedImageBackingGLTexture::ClearedRect() const {
+ if (IsPassthrough()) {
+ // This backing is used exclusively with ANGLE which handles clear tracking
+ // internally. Act as though the texture is always cleared.
+ return gfx::Rect(size());
+ } else {
return texture_->GetLevelClearedRect(texture_->target(), 0);
}
+}
- void SetClearedRect(const gfx::Rect& cleared_rect) override {
+void SharedImageBackingGLTexture::SetClearedRect(
+ const gfx::Rect& cleared_rect) {
+ if (!IsPassthrough())
texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect);
- }
-
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- GLenum target = texture_->target();
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, texture_->service_id());
-
- gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
- gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state);
- if (!image)
- return;
- if (old_state == gles2::Texture::BOUND)
- image->ReleaseTexImage(target);
-
- if (in_fence) {
- // TODO(dcastagna): Don't wait for the fence if the SharedImage is going
- // to be scanned out as an HW overlay. Currently we don't know that at
- // this point and we always bind the image, therefore we need to wait for
- // the fence.
- std::unique_ptr<gl::GLFence> egl_fence =
- gl::GLFence::CreateFromGpuFence(*in_fence.get());
- egl_fence->ServerWait();
- }
- gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND &&
- image->BindTexImage(target)) {
- new_state = gles2::Texture::BOUND;
- }
- if (old_state != new_state)
- texture_->SetLevelImage(target, 0, image, new_state);
- }
+}
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- DCHECK(texture_);
+bool SharedImageBackingGLTexture::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ if (IsPassthrough())
+ mailbox_manager->ProduceTexture(mailbox(), passthrough_texture_.get());
+ else
mailbox_manager->ProduceTexture(mailbox(), texture_);
- return true;
- }
+ return true;
+}
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {
- // Add a |service_guid| which expresses shared ownership between the
- // various GPU dumps.
- auto client_guid = GetSharedImageGUIDForTracing(mailbox());
- auto service_guid =
- gl::GetGLTextureServiceGUIDForTracing(texture_->service_id());
- pmd->CreateSharedGlobalAllocatorDump(service_guid);
- // TODO(piman): coalesce constant with TextureManager::DumpTextureRef.
- int importance = 2; // This client always owns the ref.
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingGLTexture::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, nullptr, tracker, texture_);
+}
- pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+SharedImageBackingGLTexture::ProduceGLTexturePassthrough(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(passthrough_texture_);
+ return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>(
+ manager, this, nullptr, tracker, passthrough_texture_);
+}
- // Dump all sub-levels held by the texture. They will appear below the
- // main gl/textures/client_X/mailbox_Y dump.
- texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingGLTexture::ProduceDawn(SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) {
+ if (!factory()) {
+ DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
+ return nullptr;
}
- void BeginReadAccess() override {
- GLenum target = texture_->target();
- gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
- gl::GLImage* image = texture_->GetLevelImage(target, 0, &old_state);
- if (image && old_state == gpu::gles2::Texture::UNBOUND) {
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, texture_->service_id());
- gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
- if (image->BindTexImage(target))
- new_state = gles2::Texture::BOUND;
- } else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs_,
- /*upload=*/true);
- if (image->CopyTexImage(target))
- new_state = gles2::Texture::COPIED;
- }
- if (old_state != new_state)
- texture_->SetLevelImage(target, 0, image, new_state);
- }
- }
+ return ProduceDawnCommon(factory(), manager, tracker, device, this,
+ IsPassthrough());
+}
- scoped_refptr<gfx::NativePixmap> GetNativePixmap() override {
- return native_pixmap_;
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingGLTexture::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ if (!cached_promise_texture_) {
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(),
+ GetGLServiceId(), format(), &backend_texture);
+ cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
}
+ return std::make_unique<SharedImageRepresentationSkiaImpl>(
+ manager, this, nullptr, std::move(context_state), cached_promise_texture_,
+ tracker);
+}
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- return std::make_unique<SharedImageRepresentationGLTextureImpl>(
- manager, this, tracker, texture_);
+void SharedImageBackingGLTexture::Update(
+ std::unique_ptr<gfx::GpuFence> in_fence) {}
+
+void SharedImageBackingGLTexture::InitializeGLTexture(
+ GLuint service_id,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params) {
+ SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ params.target, service_id, params.framebuffer_attachment_angle,
+ IsPassthrough() ? &passthrough_texture_ : nullptr,
+ IsPassthrough() ? nullptr : &texture_);
+
+ if (IsPassthrough()) {
+ passthrough_texture_->SetEstimatedSize(EstimatedSize(format(), size()));
+ } else {
+ texture_->SetLevelInfo(params.target, 0, params.internal_format,
+ size().width(), size().height(), 1, 0, params.format,
+ params.type,
+ params.is_cleared ? gfx::Rect(size()) : gfx::Rect());
+ texture_->SetImmutable(true, params.has_immutable_storage);
}
+}
- std::unique_ptr<SharedImageRepresentationGLTexture>
- ProduceRGBEmulationGLTexture(SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- if (!rgb_emulation_texture_) {
- GLenum target = texture_->target();
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
- // Set to false as this code path is only used on Mac.
- bool framebuffer_attachment_angle = false;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, framebuffer_attachment_angle);
-
- gles2::Texture::ImageState image_state = gles2::Texture::BOUND;
- gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state);
- if (!image) {
- LOG(ERROR) << "Texture is not bound to an image.";
- return nullptr;
- }
+void SharedImageBackingGLTexture::SetCompatibilitySwizzle(
+ const gles2::Texture::CompatibilitySwizzle* swizzle) {
+ if (!IsPassthrough())
+ texture_->SetCompatibilitySwizzle(swizzle);
+}
- DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND);
- const GLenum internal_format = GL_RGB;
- if (!image->BindTexImageWithInternalformat(target, internal_format)) {
- LOG(ERROR) << "Failed to bind image to rgb texture.";
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingGLImage
- rgb_emulation_texture_ = new gles2::Texture(service_id);
- rgb_emulation_texture_->SetLightweightRef();
- rgb_emulation_texture_->SetTarget(target, 1);
- rgb_emulation_texture_->sampler_state_.min_filter = GL_LINEAR;
- rgb_emulation_texture_->sampler_state_.mag_filter = GL_LINEAR;
- rgb_emulation_texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- rgb_emulation_texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
-
- GLenum format = gles2::TextureManager::ExtractFormatFromStorageFormat(
- internal_format);
- GLenum type =
- gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
-
- const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0);
- rgb_emulation_texture_->SetLevelInfo(target, 0, internal_format,
- info->width, info->height, 1, 0,
- format, type, info->cleared_rect);
-
- rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state);
- rgb_emulation_texture_->SetImmutable(true, false);
- }
+SharedImageBackingGLImage::SharedImageBackingGLImage(
+ scoped_refptr<gl::GLImage> image,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params,
+ const UnpackStateAttribs& attribs,
+ bool is_passthrough)
+ : SharedImageBacking(mailbox,
+ format,
+ size,
+ color_space,
+ usage,
+ EstimatedSize(format, size),
+ false /* is_thread_safe */),
+ image_(image),
+ gl_params_(params),
+ gl_unpack_attribs_(attribs),
+ is_passthrough_(is_passthrough),
+ weak_factory_(this) {
+ DCHECK(image_);
+}
- return std::make_unique<SharedImageRepresentationGLTextureImpl>(
- manager, this, tracker, rgb_emulation_texture_);
+SharedImageBackingGLImage::~SharedImageBackingGLImage() {
+ if (rgb_emulation_texture_) {
+ rgb_emulation_texture_->RemoveLightweightRef(have_context());
+ rgb_emulation_texture_ = nullptr;
}
-
- std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- scoped_refptr<SharedContextState> context_state) override {
- auto result = std::make_unique<SharedImageRepresentationSkiaImpl>(
- manager, this, std::move(context_state), cached_promise_texture_,
- tracker, texture_->target(), texture_->service_id());
- cached_promise_texture_ = result->promise_texture();
- return result;
+ if (IsPassthrough()) {
+ if (passthrough_texture_) {
+ if (!have_context())
+ passthrough_texture_->MarkContextLost();
+ passthrough_texture_.reset();
+ }
+ } else {
+ if (texture_) {
+ texture_->RemoveLightweightRef(have_context());
+ texture_ = nullptr;
+ }
}
+}
- std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- WGPUDevice device) override {
- if (!factory()) {
- DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
- return nullptr;
- }
+GLenum SharedImageBackingGLImage::GetGLTarget() const {
+ return gl_params_.target;
+}
- return ProduceDawnCommon(factory(), manager, tracker, device, this, false);
- }
+GLuint SharedImageBackingGLImage::GetGLServiceId() const {
+ return texture_ ? texture_->service_id() : passthrough_texture_->service_id();
+}
- private:
- gles2::Texture* texture_ = nullptr;
- gles2::Texture* rgb_emulation_texture_ = nullptr;
- sk_sp<SkPromiseImageTexture> cached_promise_texture_;
- const UnpackStateAttribs attribs_;
- scoped_refptr<gfx::NativePixmap> native_pixmap_;
-};
+scoped_refptr<gfx::NativePixmap> SharedImageBackingGLImage::GetNativePixmap() {
+ if (IsPassthrough())
+ return nullptr;
-// Implementation of SharedImageBacking that creates a GL Texture and stores it
-// as a gles2::TexturePassthrough. Can be used with the legacy mailbox
-// implementation.
-class SharedImageBackingPassthroughGLTexture
- : public SharedImageBackingWithReadAccess {
- public:
- SharedImageBackingPassthroughGLTexture(
- const Mailbox& mailbox,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- scoped_refptr<gles2::TexturePassthrough> passthrough_texture)
- : SharedImageBackingWithReadAccess(mailbox,
- format,
- size,
- color_space,
- usage,
- passthrough_texture->estimated_size(),
- false /* is_thread_safe */),
- texture_passthrough_(std::move(passthrough_texture)) {
- DCHECK(texture_passthrough_);
- }
+ return image_->GetNativePixmap();
+}
- ~SharedImageBackingPassthroughGLTexture() override {
- DCHECK(texture_passthrough_);
- if (!have_context())
- texture_passthrough_->MarkContextLost();
- texture_passthrough_.reset();
+void SharedImageBackingGLImage::OnMemoryDump(
+ const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) {
+ // Add a |service_guid| which expresses shared ownership between the
+ // various GPU dumps.
+ auto client_guid = GetSharedImageGUIDForTracing(mailbox());
+ auto service_guid = gl::GetGLTextureServiceGUIDForTracing(GetGLServiceId());
+ pmd->CreateSharedGlobalAllocatorDump(service_guid);
+ // TODO(piman): coalesce constant with TextureManager::DumpTextureRef.
+ int importance = 2; // This client always owns the ref.
+
+ pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+
+ if (IsPassthrough()) {
+ auto* gl_image = passthrough_texture_->GetLevelImage(GetGLTarget(), 0);
+ if (gl_image)
+ gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name);
+ } else {
+ // Dump all sub-levels held by the texture. They will appear below the
+ // main gl/textures/client_X/mailbox_Y dump.
+ texture_->DumpLevelMemory(pmd, client_tracing_id, dump_name);
}
+}
- gfx::Rect ClearedRect() const override {
+gfx::Rect SharedImageBackingGLImage::ClearedRect() const {
+ if (IsPassthrough()) {
// This backing is used exclusively with ANGLE which handles clear tracking
// internally. Act as though the texture is always cleared.
return gfx::Rect(size());
+ } else {
+ return texture_->GetLevelClearedRect(texture_->target(), 0);
}
+}
+void SharedImageBackingGLImage::SetClearedRect(const gfx::Rect& cleared_rect) {
+ if (!IsPassthrough())
+ texture_->SetLevelClearedRect(texture_->target(), 0, cleared_rect);
+}
+bool SharedImageBackingGLImage::ProduceLegacyMailbox(
+ MailboxManager* mailbox_manager) {
+ if (IsPassthrough())
+ mailbox_manager->ProduceTexture(mailbox(), passthrough_texture_.get());
+ else
+ mailbox_manager->ProduceTexture(mailbox(), texture_);
+ return true;
+}
- void SetClearedRect(const gfx::Rect& cleared_rect) override {}
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingGLImage::ProduceGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(texture_);
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, this, tracker, texture_);
+}
+std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+SharedImageBackingGLImage::ProduceGLTexturePassthrough(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ DCHECK(passthrough_texture_);
+ return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>(
+ manager, this, this, tracker, passthrough_texture_);
+}
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
- GLenum target = texture_passthrough_->target();
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
- api->glBindTextureFn(target, texture_passthrough_->service_id());
-
- gl::GLImage* image = texture_passthrough_->GetLevelImage(target, 0);
- if (!image)
- return;
- image->ReleaseTexImage(target);
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND)
- image->BindTexImage(target);
- else
- image->CopyTexImage(target);
+std::unique_ptr<SharedImageRepresentationOverlay>
+SharedImageBackingGLImage::ProduceOverlay(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+#if defined(OS_MACOSX)
+ return SharedImageBackingFactoryIOSurface::ProduceOverlay(manager, this,
+ tracker, image_);
+#else // defined(OS_MACOSX)
+ return SharedImageBacking::ProduceOverlay(manager, tracker);
+#endif // !defined(OS_MACOSX)
+}
+
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingGLImage::ProduceDawn(SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) {
+#if defined(OS_MACOSX)
+ auto result = SharedImageBackingFactoryIOSurface::ProduceDawn(
+ manager, this, tracker, device, image_);
+ if (result)
+ return result;
+#endif // defined(OS_MACOSX)
+ if (!factory()) {
+ DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
+ return nullptr;
}
- bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) override {
- DCHECK(texture_passthrough_);
- mailbox_manager->ProduceTexture(mailbox(), texture_passthrough_.get());
- return true;
+ return ProduceDawnCommon(factory(), manager, tracker, device, this,
+ IsPassthrough());
+}
+
+std::unique_ptr<SharedImageRepresentationSkia>
+SharedImageBackingGLImage::ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) {
+ if (!cached_promise_texture_) {
+ if (context_state->GrContextIsMetal()) {
+#if defined(OS_MACOSX)
+ cached_promise_texture_ =
+ SharedImageBackingFactoryIOSurface::ProduceSkiaPromiseTextureMetal(
+ this, context_state, image_);
+ DCHECK(cached_promise_texture_);
+#endif
+ } else {
+ GrBackendTexture backend_texture;
+ GetGrBackendTexture(context_state->feature_info(), GetGLTarget(), size(),
+ GetGLServiceId(), format(), &backend_texture);
+ cached_promise_texture_ = SkPromiseImageTexture::Make(backend_texture);
+ }
}
+ return std::make_unique<SharedImageRepresentationSkiaImpl>(
+ manager, this, this, std::move(context_state), cached_promise_texture_,
+ tracker);
+}
- void OnMemoryDump(const std::string& dump_name,
- base::trace_event::MemoryAllocatorDump* dump,
- base::trace_event::ProcessMemoryDump* pmd,
- uint64_t client_tracing_id) override {
- // Add a |service_guid| which expresses shared ownership between the
- // various GPU dumps.
- auto client_guid = GetSharedImageGUIDForTracing(mailbox());
- auto service_guid = gl::GetGLTextureServiceGUIDForTracing(
- texture_passthrough_->service_id());
- pmd->CreateSharedGlobalAllocatorDump(service_guid);
+std::unique_ptr<SharedImageRepresentationGLTexture>
+SharedImageBackingGLImage::ProduceRGBEmulationGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) {
+ if (IsPassthrough())
+ return nullptr;
- int importance = 2; // This client always owns the ref.
- pmd->AddOwnershipEdge(client_guid, service_guid, importance);
+ if (!rgb_emulation_texture_) {
+ const GLenum target = GetGLTarget();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
- auto* gl_image = texture_passthrough_->GetLevelImage(
- texture_passthrough_->target(), /*level=*/0);
- if (gl_image)
- gl_image->OnMemoryDump(pmd, client_tracing_id, dump_name);
+ // Set to false as this code path is only used on Mac.
+ const bool framebuffer_attachment_angle = false;
+ SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ target, 0 /* service_id */, framebuffer_attachment_angle, nullptr,
+ &rgb_emulation_texture_);
+ api->glBindTextureFn(target, rgb_emulation_texture_->service_id());
+
+ gles2::Texture::ImageState image_state = gles2::Texture::BOUND;
+ gl::GLImage* image = texture_->GetLevelImage(target, 0, &image_state);
+ DCHECK_EQ(image, image_.get());
+
+ DCHECK(image->ShouldBindOrCopy() == gl::GLImage::BIND);
+ const GLenum internal_format = GL_RGB;
+ if (!image->BindTexImageWithInternalformat(target, internal_format)) {
+ LOG(ERROR) << "Failed to bind image to rgb texture.";
+ rgb_emulation_texture_->RemoveLightweightRef(true /* have_context */);
+ rgb_emulation_texture_ = nullptr;
+ return nullptr;
+ }
+ GLenum format =
+ gles2::TextureManager::ExtractFormatFromStorageFormat(internal_format);
+ GLenum type =
+ gles2::TextureManager::ExtractTypeFromStorageFormat(internal_format);
+
+ const gles2::Texture::LevelInfo* info = texture_->GetLevelInfo(target, 0);
+ rgb_emulation_texture_->SetLevelInfo(target, 0, internal_format,
+ info->width, info->height, 1, 0,
+ format, type, info->cleared_rect);
+
+ rgb_emulation_texture_->SetLevelImage(target, 0, image, image_state);
+ rgb_emulation_texture_->SetImmutable(true, false);
}
- void BeginReadAccess() override {}
+ return std::make_unique<SharedImageRepresentationGLTextureImpl>(
+ manager, this, this, tracker, rgb_emulation_texture_);
+}
- protected:
- std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
- ProduceGLTexturePassthrough(SharedImageManager* manager,
- MemoryTypeTracker* tracker) override {
- return std::make_unique<SharedImageRepresentationGLTexturePassthroughImpl>(
- manager, this, tracker, texture_passthrough_);
+void SharedImageBackingGLImage::Update(
+ std::unique_ptr<gfx::GpuFence> in_fence) {
+ if (in_fence) {
+ // TODO(dcastagna): Don't wait for the fence if the SharedImage is going
+ // to be scanned out as an HW overlay. Currently we don't know that at
+ // this point and we always bind the image, therefore we need to wait for
+ // the fence.
+ std::unique_ptr<gl::GLFence> egl_fence =
+ gl::GLFence::CreateFromGpuFence(*in_fence.get());
+ egl_fence->ServerWait();
}
- std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- scoped_refptr<SharedContextState> context_state) override {
- auto result = std::make_unique<SharedImageRepresentationSkiaImpl>(
- manager, this, std::move(context_state), cached_promise_texture_,
- tracker, texture_passthrough_->target(),
- texture_passthrough_->service_id());
- cached_promise_texture_ = result->promise_texture();
- return result;
+ image_bind_or_copy_needed_ = true;
+}
+
+bool SharedImageBackingGLImage::OnGLTextureBeginAccess(GLenum mode) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM)
+ return true;
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::OnGLTexturePassthroughBeginAccess(GLenum mode) {
+ if (mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM)
+ return true;
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::OnSkiaBeginReadAccess() {
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::OnSkiaBeginWriteAccess() {
+ return BindOrCopyImageIfNeeded();
+}
+
+bool SharedImageBackingGLImage::InitializeGLTexture() {
+ SharedImageBackingGLCommon::MakeTextureAndSetParameters(
+ gl_params_.target, 0 /* service_id */,
+ gl_params_.framebuffer_attachment_angle,
+ IsPassthrough() ? &passthrough_texture_ : nullptr,
+ IsPassthrough() ? nullptr : &texture_);
+
+ // Set the GLImage to be unbound from the texture.
+ if (IsPassthrough()) {
+ passthrough_texture_->SetEstimatedSize(EstimatedSize(format(), size()));
+ passthrough_texture_->SetLevelImage(gl_params_.target, 0, image_.get());
+ passthrough_texture_->set_is_bind_pending(true);
+ } else {
+ texture_->SetLevelInfo(
+ gl_params_.target, 0, gl_params_.internal_format, size().width(),
+ size().height(), 1, 0, gl_params_.format, gl_params_.type,
+ gl_params_.is_cleared ? gfx::Rect(size()) : gfx::Rect());
+ texture_->SetLevelImage(gl_params_.target, 0, image_.get(),
+ gles2::Texture::UNBOUND);
+ texture_->SetImmutable(true, false /* has_immutable_storage */);
}
- std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
- SharedImageManager* manager,
- MemoryTypeTracker* tracker,
- WGPUDevice device) override {
- if (!factory()) {
- DLOG(ERROR) << "No SharedImageFactory to create a dawn representation.";
- return nullptr;
+ // Historically we have bound GLImages at initialization, rather than waiting
+ // until the bound representation is actually needed.
+ if (image_->ShouldBindOrCopy() == gl::GLImage::BIND)
+ return BindOrCopyImageIfNeeded();
+ return true;
+}
+
+bool SharedImageBackingGLImage::BindOrCopyImageIfNeeded() {
+ if (!image_bind_or_copy_needed_)
+ return true;
+
+ const GLenum target = GetGLTarget();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, GetGLServiceId());
+
+ // Un-bind the GLImage from the texture if it is currently bound.
+ if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ bool is_bound = false;
+ if (IsPassthrough()) {
+ is_bound = !passthrough_texture_->is_bind_pending();
+ } else {
+ gles2::Texture::ImageState old_state = gles2::Texture::UNBOUND;
+ texture_->GetLevelImage(target, 0, &old_state);
+ is_bound = old_state == gles2::Texture::BOUND;
}
+ if (is_bound)
+ image_->ReleaseTexImage(target);
+ }
- return ProduceDawnCommon(factory(), manager, tracker, device, this, true);
+ // Bind or copy the GLImage to the texture.
+ gles2::Texture::ImageState new_state = gles2::Texture::UNBOUND;
+ if (image_->ShouldBindOrCopy() == gl::GLImage::BIND) {
+ if (gl_params_.is_rgb_emulation) {
+ if (!image_->BindTexImageWithInternalformat(target, GL_RGB)) {
+ LOG(ERROR) << "Failed to bind GLImage to RGB target";
+ return false;
+ }
+ } else {
+ if (!image_->BindTexImage(target)) {
+ LOG(ERROR) << "Failed to bind GLImage to target";
+ return false;
+ }
+ }
+ new_state = gles2::Texture::BOUND;
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api,
+ gl_unpack_attribs_,
+ /*upload=*/true);
+ if (!image_->CopyTexImage(target)) {
+ LOG(ERROR) << "Failed to copy GLImage to target";
+ return false;
+ }
+ new_state = gles2::Texture::COPIED;
+ }
+ if (IsPassthrough()) {
+ passthrough_texture_->set_is_bind_pending(new_state ==
+ gles2::Texture::UNBOUND);
+ } else {
+ texture_->SetLevelImage(target, 0, image_.get(), new_state);
}
- private:
- scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
- sk_sp<SkPromiseImageTexture> cached_promise_texture_;
-};
+ image_bind_or_copy_needed_ = false;
+ return true;
+}
+
+void SharedImageBackingGLImage::InitializePixels(GLenum format,
+ GLenum type,
+ const uint8_t* data) {
+ DCHECK_EQ(image_->ShouldBindOrCopy(), gl::GLImage::BIND);
+ BindOrCopyImageIfNeeded();
+
+ const GLenum target = GetGLTarget();
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, GetGLServiceId());
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, gl_unpack_attribs_, true /* uploading_data */);
+ api->glTexSubImage2DFn(target, 0, 0, 0, size().width(), size().height(),
+ format, type, data);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLTexture
SharedImageBackingFactoryGLTexture::SharedImageBackingFactoryGLTexture(
const GpuPreferences& gpu_preferences,
@@ -1019,44 +1207,27 @@ SharedImageBackingFactoryGLTexture::CreateSharedImage(
image->SetColorSpace(color_space);
viz::ResourceFormat format = viz::GetResourceFormat(buffer_format);
-
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
const bool for_framebuffer_attachment =
(usage & (SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, for_framebuffer_attachment && texture_usage_angle_);
- bool is_rgb_emulation = usage & SHARED_IMAGE_USAGE_RGB_EMULATION;
-
- gles2::Texture::ImageState image_state = gles2::Texture::UNBOUND;
- if (image->ShouldBindOrCopy() == gl::GLImage::BIND) {
- bool is_bound = false;
- if (is_rgb_emulation)
- is_bound = image->BindTexImageWithInternalformat(target, GL_RGB);
- else
- is_bound = image->BindTexImage(target);
- if (is_bound) {
- image_state = gles2::Texture::BOUND;
- } else {
- LOG(ERROR) << "Failed to bind image to target.";
- api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
- }
- } else if (use_passthrough_) {
- image->CopyTexImage(target);
- image_state = gles2::Texture::COPIED;
- }
+ const bool is_rgb_emulation = (usage & SHARED_IMAGE_USAGE_RGB_EMULATION) != 0;
- GLuint internal_format =
+ SharedImageBackingGLCommon::InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format =
is_rgb_emulation ? GL_RGB : image->GetInternalFormat();
- GLenum gl_format = is_rgb_emulation ? GL_RGB : image->GetDataFormat();
- GLenum gl_type = image->GetDataType();
-
- return MakeBacking(use_passthrough_, mailbox, target, service_id, image,
- image_state, internal_format, gl_format, gl_type, nullptr,
- true, false, format, size, color_space, usage, attribs);
+ params.format = is_rgb_emulation ? GL_RGB : image->GetDataFormat();
+ params.type = image->GetDataType();
+ params.is_cleared = true;
+ params.is_rgb_emulation = is_rgb_emulation;
+ params.framebuffer_attachment_angle =
+ for_framebuffer_attachment && texture_usage_angle_;
+ auto result = std::make_unique<SharedImageBackingGLImage>(
+ image, mailbox, format, size, color_space, usage, params, attribs,
+ use_passthrough_);
+ if (!result->InitializeGLTexture())
+ return nullptr;
+ return std::move(result);
}
std::unique_ptr<SharedImageBacking>
@@ -1068,11 +1239,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageForTest(
viz::ResourceFormat format,
const gfx::Size& size,
uint32_t usage) {
- return MakeBacking(false, mailbox, target, service_id, nullptr,
- gles2::Texture::UNBOUND, viz::GLInternalFormat(format),
- viz::GLDataFormat(format), viz::GLDataType(format),
- nullptr, is_cleared, false, format, size,
- gfx::ColorSpace(), usage, UnpackStateAttribs());
+ auto result = std::make_unique<SharedImageBackingGLTexture>(
+ mailbox, format, size, gfx::ColorSpace(), usage,
+ false /* is_passthrough */);
+ SharedImageBackingGLCommon::InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format = viz::GLInternalFormat(format);
+ params.format = viz::GLDataFormat(format);
+ params.type = viz::GLDataType(format);
+ params.is_cleared = is_cleared;
+ result->InitializeGLTexture(service_id, params);
+ return std::move(result);
}
scoped_refptr<gl::GLImage> SharedImageBackingFactoryGLTexture::MakeGLImage(
@@ -1109,66 +1286,6 @@ bool SharedImageBackingFactoryGLTexture::CanImportGpuMemoryBuffer(
}
std::unique_ptr<SharedImageBacking>
-SharedImageBackingFactoryGLTexture::MakeBacking(
- bool passthrough,
- const Mailbox& mailbox,
- GLenum target,
- GLuint service_id,
- scoped_refptr<gl::GLImage> image,
- gles2::Texture::ImageState image_state,
- GLuint level_info_internal_format,
- GLuint gl_format,
- GLuint gl_type,
- const gles2::Texture::CompatibilitySwizzle* swizzle,
- bool is_cleared,
- bool has_immutable_storage,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const UnpackStateAttribs& attribs) {
- if (passthrough) {
- scoped_refptr<gles2::TexturePassthrough> passthrough_texture =
- base::MakeRefCounted<gles2::TexturePassthrough>(service_id, target);
- if (image) {
- passthrough_texture->SetLevelImage(target, 0, image.get());
- passthrough_texture->set_is_bind_pending(image_state ==
- gles2::Texture::UNBOUND);
- }
-
- // Get the texture size from ANGLE and set it on the passthrough texture.
- GLint texture_memory_size = 0;
- gl::GLApi* api = gl::g_current_gl_context;
- api->glGetTexParameterivFn(target, GL_MEMORY_SIZE_ANGLE,
- &texture_memory_size);
- passthrough_texture->SetEstimatedSize(texture_memory_size);
-
- return std::make_unique<SharedImageBackingPassthroughGLTexture>(
- mailbox, format, size, color_space, usage,
- std::move(passthrough_texture));
- } else {
- gles2::Texture* texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(target, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->SetLevelInfo(target, 0, level_info_internal_format, size.width(),
- size.height(), 1, 0, gl_format, gl_type,
- is_cleared ? gfx::Rect(size) : gfx::Rect());
- if (swizzle)
- texture->SetCompatibilitySwizzle(swizzle);
- if (image)
- texture->SetLevelImage(target, 0, image.get(), image_state);
- texture->SetImmutable(true, has_immutable_storage);
-
- return std::make_unique<SharedImageBackingGLTexture>(
- mailbox, format, size, color_space, usage, texture, attribs);
- }
-}
-
-std::unique_ptr<SharedImageBacking>
SharedImageBackingFactoryGLTexture::MakeEglImageBacking(
const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -1291,23 +1408,17 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal(
}
}
- gl::GLApi* api = gl::g_current_gl_context;
- ScopedRestoreTexture scoped_restore(api, target);
-
const bool for_framebuffer_attachment =
(usage & (SHARED_IMAGE_USAGE_RASTER |
SHARED_IMAGE_USAGE_GLES2_FRAMEBUFFER_HINT)) != 0;
- GLuint service_id = MakeTextureAndSetParameters(
- api, target, for_framebuffer_attachment && texture_usage_angle_);
scoped_refptr<gl::GLImage> image;
+
// TODO(piman): We pretend the texture was created in an ES2 context, so that
// it can be used in other ES2 contexts, and so we have to pass gl_format as
// the internal format in the LevelInfo. https://crbug.com/628064
GLuint level_info_internal_format = format_info.gl_format;
bool is_cleared = false;
- bool needs_subimage_upload = false;
- bool has_immutable_storage = false;
if (use_buffer) {
image = image_factory_->CreateAnonymousImage(
size, format_info.buffer_format, gfx::BufferUsage::SCANOUT,
@@ -1322,55 +1433,79 @@ SharedImageBackingFactoryGLTexture::CreateSharedImageInternal(
surface_handle, &is_cleared);
}
// The allocated image should not require copy.
- if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND ||
- !image->BindTexImage(target)) {
- LOG(ERROR) << "CreateSharedImage: Failed to "
- << (image ? "bind" : "create") << " image";
- api->glDeleteTexturesFn(1, &service_id);
+ if (!image || image->ShouldBindOrCopy() != gl::GLImage::BIND) {
+ LOG(ERROR) << "CreateSharedImage: Failed to create bindable image";
return nullptr;
}
level_info_internal_format = image->GetInternalFormat();
if (color_space.IsValid())
image->SetColorSpace(color_space);
- needs_subimage_upload = !pixel_data.empty();
- } else if (format_info.supports_storage) {
- api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
- size.width(), size.height());
- has_immutable_storage = true;
- needs_subimage_upload = !pixel_data.empty();
- } else if (format_info.is_compressed) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glCompressedTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- pixel_data.size(), pixel_data.data());
- } else {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glTexImage2DFn(target, 0, format_info.image_internal_format,
- size.width(), size.height(), 0,
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
}
- // If we are using a buffer or TexStorage API but have data to upload, do so
- // now via TexSubImage2D.
- if (needs_subimage_upload) {
- ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
- !pixel_data.empty());
- api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
- format_info.adjusted_format, format_info.gl_type,
- pixel_data.data());
- }
+ SharedImageBackingGLCommon::InitializeGLTextureParams params;
+ params.target = target;
+ params.internal_format = level_info_internal_format;
+ params.format = format_info.gl_format;
+ params.type = format_info.gl_type;
+ params.is_cleared = pixel_data.empty() ? is_cleared : true;
+ params.has_immutable_storage = !image && format_info.supports_storage;
+ params.framebuffer_attachment_angle =
+ for_framebuffer_attachment && texture_usage_angle_;
+
+ if (image) {
+ DCHECK(!format_info.swizzle);
+ auto result = std::make_unique<SharedImageBackingGLImage>(
+ image, mailbox, format, size, color_space, usage, params, attribs,
+ use_passthrough_);
+ if (!result->InitializeGLTexture())
+ return nullptr;
+ if (!pixel_data.empty()) {
+ result->InitializePixels(format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ return std::move(result);
+ } else {
+ auto result = std::make_unique<SharedImageBackingGLTexture>(
+ mailbox, format, size, color_space, usage, use_passthrough_);
+ result->InitializeGLTexture(0, params);
- return MakeBacking(
- use_passthrough_, mailbox, target, service_id, image,
- gles2::Texture::BOUND, level_info_internal_format, format_info.gl_format,
- format_info.gl_type, format_info.swizzle,
- pixel_data.empty() ? is_cleared : true, has_immutable_storage, format,
- size, color_space, usage, attribs);
+ gl::GLApi* api = gl::g_current_gl_context;
+ ScopedRestoreTexture scoped_restore(api, target);
+ api->glBindTextureFn(target, result->GetGLServiceId());
+
+ if (format_info.supports_storage) {
+ api->glTexStorage2DEXTFn(target, 1, format_info.storage_internal_format,
+ size.width(), size.height());
+
+ if (!pixel_data.empty()) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(
+ api, attribs, true /* uploading_data */);
+ api->glTexSubImage2DFn(target, 0, 0, 0, size.width(), size.height(),
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ } else if (format_info.is_compressed) {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glCompressedTexImage2DFn(
+ target, 0, format_info.image_internal_format, size.width(),
+ size.height(), 0, pixel_data.size(), pixel_data.data());
+ } else {
+ ScopedResetAndRestoreUnpackState scoped_unpack_state(api, attribs,
+ !pixel_data.empty());
+ api->glTexImage2DFn(target, 0, format_info.image_internal_format,
+ size.width(), size.height(), 0,
+ format_info.adjusted_format, format_info.gl_type,
+ pixel_data.data());
+ }
+ result->SetCompatibilitySwizzle(format_info.swizzle);
+ return std::move(result);
+ }
}
+///////////////////////////////////////////////////////////////////////////////
+// SharedImageBackingFactoryGLTexture::FormatInfo
+
SharedImageBackingFactoryGLTexture::FormatInfo::FormatInfo() = default;
SharedImageBackingFactoryGLTexture::FormatInfo::~FormatInfo() = default;
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
index 257cca42041..b73c65631d9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h
@@ -94,24 +94,6 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryGLTexture
gfx::BufferFormat format,
SurfaceHandle surface_handle,
const gfx::Size& size);
- static std::unique_ptr<SharedImageBacking> MakeBacking(
- bool passthrough,
- const Mailbox& mailbox,
- GLenum target,
- GLuint service_id,
- scoped_refptr<gl::GLImage> image,
- gles2::Texture::ImageState image_state,
- GLuint internal_format,
- GLuint gl_format,
- GLuint gl_type,
- const gles2::Texture::CompatibilitySwizzle* swizzle,
- bool is_cleared,
- bool has_immutable_storage,
- viz::ResourceFormat format,
- const gfx::Size& size,
- const gfx::ColorSpace& color_space,
- uint32_t usage,
- const UnpackStateAttribs& attribs);
// This is meant to be used only on Android. Return nullptr for other
// platforms.
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h
new file mode 100644
index 00000000000..dafdfd4a359
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_internal.h
@@ -0,0 +1,296 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_
+
+#include "gpu/command_buffer/service/shared_image_backing.h"
+#include "gpu/command_buffer/service/shared_image_backing_factory_gl_texture.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+
+namespace gpu {
+
+// Representation of a SharedImageBackingGLTexture or SharedImageBackingGLImage
+// as a GL Texture.
+class SharedImageRepresentationGLTextureImpl
+ : public SharedImageRepresentationGLTexture {
+ public:
+ class Client {
+ public:
+ virtual bool OnGLTextureBeginAccess(GLenum mode) = 0;
+ };
+ SharedImageRepresentationGLTextureImpl(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ gles2::Texture* texture);
+
+ private:
+ // SharedImageRepresentationGLTexture:
+ gles2::Texture* GetTexture() override;
+ bool BeginAccess(GLenum mode) override;
+
+ Client* const client_ = nullptr;
+ gles2::Texture* texture_;
+};
+
+// Representation of a SharedImageBackingGLTexture or
+// SharedImageBackingGLTexturePassthrough as a GL TexturePassthrough.
+class SharedImageRepresentationGLTexturePassthroughImpl
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ class Client {
+ public:
+ virtual bool OnGLTexturePassthroughBeginAccess(GLenum mode) = 0;
+ };
+ SharedImageRepresentationGLTexturePassthroughImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough);
+ ~SharedImageRepresentationGLTexturePassthroughImpl() override;
+
+ private:
+ // SharedImageRepresentationGLTexturePassthrough:
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override;
+ bool BeginAccess(GLenum mode) override;
+
+ Client* const client_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+};
+
+// Common helper functions for SharedImageBackingGLTexture and
+// SharedImageBackingPassthroughGLImage.
+class SharedImageBackingGLCommon : public SharedImageBacking {
+ public:
+ // These parameters are used to explicitly initialize a GL texture.
+ // TODO(https://crbug.com/1092155): The goal here is to cache these parameters
+ // (which are specified at initialization), so that the GL texture can be
+ // allocated and bound lazily. In that world, |service_id| will not be a
+ // parameter, but will be allocated lazily, and |image| will be handled by the
+ // relevant sub-class.
+ struct InitializeGLTextureParams {
+ GLenum target = 0;
+ GLenum internal_format = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+ bool is_cleared = false;
+ bool is_rgb_emulation = false;
+ bool framebuffer_attachment_angle = false;
+ bool has_immutable_storage = false;
+ };
+
+ // Helper function to create a GL texture.
+ static void MakeTextureAndSetParameters(
+ GLenum target,
+ GLuint service_id,
+ bool framebuffer_attachment_angle,
+ scoped_refptr<gles2::TexturePassthrough>* passthrough_texture,
+ gles2::Texture** texture);
+};
+
+// Skia representation for both SharedImageBackingGLCommon.
+class SharedImageRepresentationSkiaImpl : public SharedImageRepresentationSkia {
+ public:
+ class Client {
+ public:
+ virtual bool OnSkiaBeginReadAccess() = 0;
+ virtual bool OnSkiaBeginWriteAccess() = 0;
+ };
+ SharedImageRepresentationSkiaImpl(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ Client* client,
+ scoped_refptr<SharedContextState> context_state,
+ sk_sp<SkPromiseImageTexture> promise_texture,
+ MemoryTypeTracker* tracker);
+ ~SharedImageRepresentationSkiaImpl() override;
+
+ void SetBeginReadAccessCallback(
+ base::RepeatingClosure begin_read_access_callback);
+
+ private:
+ // SharedImageRepresentationSkia:
+ sk_sp<SkSurface> BeginWriteAccess(
+ int final_msaa_count,
+ const SkSurfaceProps& surface_props,
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
+ void EndWriteAccess(sk_sp<SkSurface> surface) override;
+ sk_sp<SkPromiseImageTexture> BeginReadAccess(
+ std::vector<GrBackendSemaphore>* begin_semaphores,
+ std::vector<GrBackendSemaphore>* end_semaphores) override;
+ void EndReadAccess() override;
+ bool SupportsMultipleConcurrentReadAccess() override;
+
+ void CheckContext();
+
+ Client* const client_ = nullptr;
+ scoped_refptr<SharedContextState> context_state_;
+ sk_sp<SkPromiseImageTexture> promise_texture_;
+
+ SkSurface* write_surface_ = nullptr;
+#if DCHECK_IS_ON()
+ gl::GLContext* context_ = nullptr;
+#endif
+};
+
+// Implementation of SharedImageBacking that creates a GL Texture that is not
+// backed by a GLImage.
+class SharedImageBackingGLTexture : public SharedImageBacking {
+ public:
+ SharedImageBackingGLTexture(const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ bool is_passthrough);
+ SharedImageBackingGLTexture(const SharedImageBackingGLTexture&) = delete;
+ SharedImageBackingGLTexture& operator=(const SharedImageBackingGLTexture&) =
+ delete;
+ ~SharedImageBackingGLTexture() override;
+
+ void InitializeGLTexture(
+ GLuint service_id,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params);
+ void SetCompatibilitySwizzle(
+ const gles2::Texture::CompatibilitySwizzle* swizzle);
+
+ GLenum GetGLTarget() const;
+ GLuint GetGLServiceId() const;
+
+ private:
+ // SharedImageBacking:
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override;
+ gfx::Rect ClearedRect() const final;
+ void SetClearedRect(const gfx::Rect& cleared_rect) final;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final;
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) final;
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
+
+ bool IsPassthrough() const { return is_passthrough_; }
+
+ const bool is_passthrough_;
+ gles2::Texture* texture_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture_;
+
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+};
+
+// Implementation of SharedImageBacking that creates a GL Texture that is backed
+// by a GLImage and stores it as a gles2::Texture. Can be used with the legacy
+// mailbox implementation.
+class SharedImageBackingGLImage
+ : public SharedImageBacking,
+ public SharedImageRepresentationGLTextureImpl::Client,
+ public SharedImageRepresentationGLTexturePassthroughImpl::Client,
+ public SharedImageRepresentationSkiaImpl::Client {
+ public:
+ SharedImageBackingGLImage(
+ scoped_refptr<gl::GLImage> image,
+ const Mailbox& mailbox,
+ viz::ResourceFormat format,
+ const gfx::Size& size,
+ const gfx::ColorSpace& color_space,
+ uint32_t usage,
+ const SharedImageBackingGLCommon::InitializeGLTextureParams& params,
+ const SharedImageBackingFactoryGLTexture::UnpackStateAttribs& attribs,
+ bool is_passthrough);
+ SharedImageBackingGLImage(const SharedImageBackingGLImage& other) = delete;
+ SharedImageBackingGLImage& operator=(const SharedImageBackingGLImage& other) =
+ delete;
+ ~SharedImageBackingGLImage() override;
+
+ bool InitializeGLTexture();
+ void InitializePixels(GLenum format, GLenum type, const uint8_t* data);
+
+ GLenum GetGLTarget() const;
+ GLuint GetGLServiceId() const;
+
+ private:
+ // SharedImageBacking:
+ scoped_refptr<gfx::NativePixmap> GetNativePixmap() override;
+ void OnMemoryDump(const std::string& dump_name,
+ base::trace_event::MemoryAllocatorDump* dump,
+ base::trace_event::ProcessMemoryDump* pmd,
+ uint64_t client_tracing_id) override;
+ gfx::Rect ClearedRect() const final;
+ void SetClearedRect(const gfx::Rect& cleared_rect) final;
+ bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final;
+ std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) final;
+ std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device) final;
+ std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<SharedContextState> context_state) override;
+ std::unique_ptr<SharedImageRepresentationGLTexture>
+ ProduceRGBEmulationGLTexture(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override;
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override;
+
+ // SharedImageRepresentationGLTextureImpl::Client:
+ bool OnGLTextureBeginAccess(GLenum mode) override;
+
+ // SharedImageRepresentationGLTexturePassthroughImpl::Client:
+ bool OnGLTexturePassthroughBeginAccess(GLenum mode) override;
+
+ // SharedImageRepresentationGLTextureImpl::Client:
+ bool OnSkiaBeginReadAccess() override;
+ bool OnSkiaBeginWriteAccess() override;
+
+ bool IsPassthrough() const { return is_passthrough_; }
+
+ scoped_refptr<gl::GLImage> image_;
+
+ // If |image_bind_or_copy_needed_| is true, then either bind or copy |image_|
+ // to the GL texture, and un-set |image_bind_or_copy_needed_|.
+ bool BindOrCopyImageIfNeeded();
+ bool image_bind_or_copy_needed_ = true;
+
+ const SharedImageBackingGLCommon::InitializeGLTextureParams gl_params_;
+ const SharedImageBackingFactoryGLTexture::UnpackStateAttribs
+ gl_unpack_attribs_;
+ const bool is_passthrough_;
+
+ gles2::Texture* rgb_emulation_texture_ = nullptr;
+ gles2::Texture* texture_ = nullptr;
+ scoped_refptr<gles2::TexturePassthrough> passthrough_texture_;
+
+ sk_sp<SkPromiseImageTexture> cached_promise_texture_;
+
+ base::WeakPtrFactory<SharedImageBackingGLImage> weak_factory_;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_IMAGE_BACKING_FACTORY_GL_TEXTURE_INTERNAL_H_
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
index fb37ea94ee4..ccbe66b99c9 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_gl_texture_unittest.cc
@@ -863,6 +863,22 @@ TEST_P(SharedImageBackingFactoryGLTextureWithGMBTest,
EXPECT_TRUE(stub_image->bound());
int update_counter = stub_image->update_counter();
ref->Update(nullptr);
+ EXPECT_EQ(stub_image->update_counter(), update_counter);
+ EXPECT_TRUE(stub_image->bound());
+
+ // TODO(https://crbug.com/1092155): When we lazily bind the GLImage, this
+ // will be needed to trigger binding the GLImage.
+ {
+ auto skia_representation =
+ shared_image_representation_factory_->ProduceSkia(mailbox,
+ context_state_);
+ std::vector<GrBackendSemaphore> begin_semaphores;
+ std::vector<GrBackendSemaphore> end_semaphores;
+ std::unique_ptr<SharedImageRepresentationSkia::ScopedReadAccess>
+ scoped_read_access;
+ skia_representation->BeginScopedReadAccess(&begin_semaphores,
+ &end_semaphores);
+ }
EXPECT_TRUE(stub_image->bound());
EXPECT_GT(stub_image->update_counter(), update_counter);
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
index 4d7006bc582..d0335b8a227 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.h
@@ -9,9 +9,12 @@
#include "base/macros.h"
#include "components/viz/common/resources/resource_format.h"
+#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_backing_factory.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
#include "gpu/gpu_gles2_export.h"
#include "ui/gl/gl_bindings.h"
+#include "ui/gl/gl_image.h"
namespace gfx {
class Size;
@@ -22,7 +25,6 @@ namespace gpu {
class GpuDriverBugWorkarounds;
struct GpuFeatureInfo;
struct Mailbox;
-class SharedImageBacking;
// Implementation of SharedImageBackingFactory that produce IOSurface backed
// SharedImages. This is meant to be used on macOS only.
@@ -34,6 +36,24 @@ class GPU_GLES2_EXPORT SharedImageBackingFactoryIOSurface
bool use_gl);
~SharedImageBackingFactoryIOSurface() override;
+ // Helper functions used used by SharedImageRepresentationGLImage to do
+ // IOSurface-specific sharing.
+ static sk_sp<SkPromiseImageTexture> ProduceSkiaPromiseTextureMetal(
+ SharedImageBacking* backing,
+ scoped_refptr<SharedContextState> context_state,
+ scoped_refptr<gl::GLImage> image);
+ static std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImage> image);
+ static std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ scoped_refptr<gl::GLImage> image);
+
// SharedImageBackingFactory implementation.
std::unique_ptr<SharedImageBacking> CreateSharedImage(
const Mailbox& mailbox,
diff --git a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
index f2e9f952c4b..7e01171b0c1 100644
--- a/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
+++ b/chromium/gpu/command_buffer/service/shared_image_backing_factory_iosurface.mm
@@ -110,28 +110,11 @@ base::scoped_nsprotocol<id<MTLTexture>> API_AVAILABLE(macos(10.11))
viz::ResourceFormat format) {
TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::CreateMetalTexture");
base::scoped_nsprotocol<id<MTLTexture>> mtl_texture;
- MTLPixelFormat mtl_pixel_format;
- switch (format) {
- case viz::RED_8:
- case viz::ALPHA_8:
- case viz::LUMINANCE_8:
- mtl_pixel_format = MTLPixelFormatR8Unorm;
- break;
- case viz::RG_88:
- mtl_pixel_format = MTLPixelFormatRG8Unorm;
- break;
- case viz::RGBA_8888:
- mtl_pixel_format = MTLPixelFormatRGBA8Unorm;
- break;
- case viz::BGRA_8888:
- mtl_pixel_format = MTLPixelFormatBGRA8Unorm;
- break;
- default:
- // TODO(https://crbug.com/952063): Add support for all formats supported
- // by GLImageIOSurface.
- DLOG(ERROR) << "Resource format not yet supported in Metal.";
- return mtl_texture;
- }
+ MTLPixelFormat mtl_pixel_format =
+ static_cast<MTLPixelFormat>(viz::ToMTLPixelFormat(format));
+ if (mtl_pixel_format == MTLPixelFormatInvalid)
+ return mtl_texture;
+
base::scoped_nsobject<MTLTextureDescriptor> mtl_tex_desc(
[MTLTextureDescriptor new]);
[mtl_tex_desc setTextureType:MTLTextureType2D];
@@ -186,6 +169,32 @@ class SharedImageRepresentationGLTextureIOSurface
DISALLOW_COPY_AND_ASSIGN(SharedImageRepresentationGLTextureIOSurface);
};
+class SharedImageRepresentationGLTexturePassthroughIOSurface
+ : public SharedImageRepresentationGLTexturePassthrough {
+ public:
+ SharedImageRepresentationGLTexturePassthroughIOSurface(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough)
+ : SharedImageRepresentationGLTexturePassthrough(manager,
+ backing,
+ tracker),
+ texture_passthrough_(texture_passthrough) {}
+
+ const scoped_refptr<gles2::TexturePassthrough>& GetTexturePassthrough()
+ override {
+ return texture_passthrough_;
+ }
+ bool BeginAccess(GLenum mode) override { return true; }
+ void EndAccess() override { FlushIOSurfaceGLOperations(); }
+
+ private:
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough_;
+ DISALLOW_COPY_AND_ASSIGN(
+ SharedImageRepresentationGLTexturePassthroughIOSurface);
+};
+
// Representation of a SharedImageBackingIOSurface as a Skia Texture.
class SharedImageRepresentationSkiaIOSurface
: public SharedImageRepresentationSkia {
@@ -217,7 +226,7 @@ class SharedImageRepresentationSkiaIOSurface
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- return SkSurface::MakeFromBackendTextureAsRenderTarget(
+ return SkSurface::MakeFromBackendTexture(
context_state_->gr_context(), promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
backing()->color_space().ToSkColorSpace(), &surface_props);
@@ -250,6 +259,26 @@ class SharedImageRepresentationSkiaIOSurface
gles2::Texture* const gles2_texture_;
};
+class SharedImageRepresentationOverlayIOSurface
+ : public SharedImageRepresentationOverlay {
+ public:
+ SharedImageRepresentationOverlayIOSurface(SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImage> gl_image)
+ : SharedImageRepresentationOverlay(manager, backing, tracker),
+ gl_image_(gl_image) {}
+
+ ~SharedImageRepresentationOverlayIOSurface() override { EndReadAccess(); }
+
+ private:
+ bool BeginReadAccess() override { return true; }
+ void EndReadAccess() override {}
+ gl::GLImage* GetGLImage() override { return gl_image_.get(); }
+
+ scoped_refptr<gl::GLImage> gl_image_;
+};
+
// Representation of a SharedImageBackingIOSurface as a Dawn Texture.
#if BUILDFLAG(USE_DAWN)
class SharedImageRepresentationDawnIOSurface
@@ -414,7 +443,7 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
bool ProduceLegacyMailbox(MailboxManager* mailbox_manager) final {
DCHECK(io_surface_);
- legacy_texture_ = GenGLTexture();
+ GenGLTexture(&legacy_texture_, nullptr);
if (!legacy_texture_) {
return false;
}
@@ -432,15 +461,28 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
std::unique_ptr<SharedImageRepresentationGLTexture> ProduceGLTexture(
SharedImageManager* manager,
MemoryTypeTracker* tracker) final {
- gles2::Texture* texture = GenGLTexture();
- if (!texture) {
+ gles2::Texture* texture = nullptr;
+ GenGLTexture(&texture, nullptr);
+ if (!texture)
return nullptr;
- }
-
return std::make_unique<SharedImageRepresentationGLTextureIOSurface>(
manager, this, tracker, texture);
}
+ std::unique_ptr<SharedImageRepresentationGLTexturePassthrough>
+ ProduceGLTexturePassthrough(SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ TRACE_EVENT0("gpu",
+ "SharedImageBackingFactoryIOSurface::GenGLTexturePassthrough");
+ scoped_refptr<gles2::TexturePassthrough> texture_passthrough;
+ GenGLTexture(nullptr, &texture_passthrough);
+ if (!texture_passthrough)
+ return nullptr;
+ return std::make_unique<
+ SharedImageRepresentationGLTexturePassthroughIOSurface>(
+ manager, this, tracker, texture_passthrough);
+ }
+
std::unique_ptr<SharedImageRepresentationSkia> ProduceSkia(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
@@ -448,7 +490,7 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
gles2::Texture* gles2_texture = nullptr;
GrBackendTexture gr_backend_texture;
if (context_state->GrContextIsGL()) {
- gles2_texture = GenGLTexture();
+ GenGLTexture(&gles2_texture, nullptr);
if (!gles2_texture)
return nullptr;
GetGrBackendTexture(
@@ -475,6 +517,15 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
gles2_texture);
}
+ std::unique_ptr<SharedImageRepresentationOverlay> ProduceOverlay(
+ SharedImageManager* manager,
+ MemoryTypeTracker* tracker) override {
+ if (!EnsureGLImage())
+ return nullptr;
+ return SharedImageBackingFactoryIOSurface::ProduceOverlay(
+ manager, this, tracker, gl_image_);
+ }
+
std::unique_ptr<SharedImageRepresentationDawn> ProduceDawn(
SharedImageManager* manager,
MemoryTypeTracker* tracker,
@@ -493,19 +544,35 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
}
private:
- gles2::Texture* GenGLTexture() {
+ bool EnsureGLImage() {
+ if (!gl_image_) {
+ GLFormatInfo gl_info = GetGLFormatInfo(format());
+ scoped_refptr<gl::GLImageIOSurface> gl_image(
+ gl::GLImageIOSurface::Create(size(), gl_info.internal_format));
+ if (!gl_image->Initialize(io_surface_, gfx::GenericSharedMemoryId(),
+ viz::BufferFormat(format()))) {
+ LOG(ERROR) << "Failed to create GLImageIOSurface";
+ } else {
+ gl_image_ = gl_image;
+ }
+ }
+ return !!gl_image_;
+ }
+
+ void GenGLTexture(
+ gles2::Texture** texture,
+ scoped_refptr<gles2::TexturePassthrough>* texture_passthrough) {
TRACE_EVENT0("gpu", "SharedImageBackingFactoryIOSurface::GenGLTexture");
GLFormatInfo gl_info = GetGLFormatInfo(format());
DCHECK(gl_info.supported);
+ if (texture)
+ *texture = nullptr;
+ if (texture_passthrough)
+ *texture_passthrough = nullptr;
// Wrap the IOSurface in a GLImageIOSurface
- scoped_refptr<gl::GLImageIOSurface> image(
- gl::GLImageIOSurface::Create(size(), gl_info.internal_format));
- if (!image->Initialize(io_surface_, gfx::GenericSharedMemoryId(),
- viz::BufferFormat(format()))) {
- LOG(ERROR) << "Failed to create GLImageIOSurface";
- return nullptr;
- }
+ if (!EnsureGLImage())
+ return;
gl::GLApi* api = gl::g_current_gl_context;
@@ -527,37 +594,48 @@ class SharedImageBackingIOSurface : public ClearTrackingSharedImageBacking {
GL_CLAMP_TO_EDGE);
// Bind the GLImageIOSurface to our texture
- if (!image->BindTexImage(GL_TEXTURE_RECTANGLE)) {
+ if (!gl_image_->BindTexImage(GL_TEXTURE_RECTANGLE)) {
LOG(ERROR) << "Failed to bind GLImageIOSurface";
api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding);
api->glDeleteTexturesFn(1, &service_id);
- return nullptr;
+ return;
}
// If the backing is already cleared, no need to clear it again.
gfx::Rect cleared_rect = ClearedRect();
// Manually create a gles2::Texture wrapping our driver texture.
- gles2::Texture* texture = new gles2::Texture(service_id);
- texture->SetLightweightRef();
- texture->SetTarget(GL_TEXTURE_RECTANGLE, 1);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format,
- size().width(), size().height(), 1, 0, gl_info.format,
- gl_info.type, cleared_rect);
- texture->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, image.get(),
- gles2::Texture::BOUND);
- texture->SetImmutable(true, false);
-
- DCHECK_EQ(image->GetInternalFormat(), gl_info.internal_format);
+ if (texture) {
+ *texture = new gles2::Texture(service_id);
+ (*texture)->SetLightweightRef();
+ (*texture)->SetTarget(GL_TEXTURE_RECTANGLE, 1);
+ (*texture)->set_min_filter(GL_LINEAR);
+ (*texture)->set_mag_filter(GL_LINEAR);
+ (*texture)->set_wrap_t(GL_CLAMP_TO_EDGE);
+ (*texture)->set_wrap_s(GL_CLAMP_TO_EDGE);
+ (*texture)->SetLevelInfo(GL_TEXTURE_RECTANGLE, 0, gl_info.internal_format,
+ size().width(), size().height(), 1, 0,
+ gl_info.format, gl_info.type, cleared_rect);
+ (*texture)->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, gl_image_.get(),
+ gles2::Texture::BOUND);
+ (*texture)->SetImmutable(true, false);
+ }
+ if (texture_passthrough) {
+ *texture_passthrough = scoped_refptr<gles2::TexturePassthrough>(
+ new gles2::TexturePassthrough(service_id, GL_TEXTURE_RECTANGLE,
+ gl_info.internal_format, size().width(),
+ size().height(), 1, 0, gl_info.format,
+ gl_info.type));
+ (*texture_passthrough)
+ ->SetLevelImage(GL_TEXTURE_RECTANGLE, 0, gl_image_.get());
+ }
+
+ DCHECK_EQ(gl_image_->GetInternalFormat(), gl_info.internal_format);
api->glBindTextureFn(GL_TEXTURE_RECTANGLE, old_texture_binding);
- return texture;
}
+ scoped_refptr<gl::GLImageIOSurface> gl_image_;
base::ScopedCFTypeRef<IOSurfaceRef> io_surface_;
base::Optional<WGPUTextureFormat> dawn_format_;
base::scoped_nsprotocol<id<MTLTexture>> mtl_texture_;
@@ -715,4 +793,73 @@ bool SharedImageBackingFactoryIOSurface::CanImportGpuMemoryBuffer(
return false;
}
+// static
+sk_sp<SkPromiseImageTexture>
+SharedImageBackingFactoryIOSurface::ProduceSkiaPromiseTextureMetal(
+ SharedImageBacking* backing,
+ scoped_refptr<SharedContextState> context_state,
+ scoped_refptr<gl::GLImage> image) {
+ if (@available(macOS 10.11, *)) {
+ DCHECK(context_state->GrContextIsMetal());
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface =
+ static_cast<gl::GLImageIOSurface*>(image.get())->io_surface();
+
+ id<MTLDevice> mtl_device =
+ context_state->metal_context_provider()->GetMTLDevice();
+ auto mtl_texture = CreateMetalTexture(mtl_device, io_surface.get(),
+ backing->size(), backing->format());
+ DCHECK(mtl_texture);
+
+ GrMtlTextureInfo info;
+ info.fTexture.retain(mtl_texture.get());
+ auto gr_backend_texture =
+ GrBackendTexture(backing->size().width(), backing->size().height(),
+ GrMipMapped::kNo, info);
+ return SkPromiseImageTexture::Make(gr_backend_texture);
+ }
+ return nullptr;
+}
+
+// static
+std::unique_ptr<SharedImageRepresentationOverlay>
+SharedImageBackingFactoryIOSurface::ProduceOverlay(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ scoped_refptr<gl::GLImage> image) {
+ return std::make_unique<SharedImageRepresentationOverlayIOSurface>(
+ manager, backing, tracker, image);
+}
+
+// static
+std::unique_ptr<SharedImageRepresentationDawn>
+SharedImageBackingFactoryIOSurface::ProduceDawn(
+ SharedImageManager* manager,
+ SharedImageBacking* backing,
+ MemoryTypeTracker* tracker,
+ WGPUDevice device,
+ scoped_refptr<gl::GLImage> image) {
+#if BUILDFLAG(USE_DAWN)
+ // See comments in SharedImageBackingFactoryIOSurface::CreateSharedImage
+ // regarding RGBA versus BGRA.
+ viz::ResourceFormat actual_format = backing->format();
+ if (actual_format == viz::RGBA_8888)
+ actual_format = viz::BGRA_8888;
+
+ base::ScopedCFTypeRef<IOSurfaceRef> io_surface =
+ static_cast<gl::GLImageIOSurface*>(image.get())->io_surface();
+
+ base::Optional<WGPUTextureFormat> wgpu_format =
+ viz::ToWGPUFormat(actual_format);
+ if (wgpu_format.value() == WGPUTextureFormat_Undefined)
+ return nullptr;
+
+ return std::make_unique<SharedImageRepresentationDawnIOSurface>(
+ manager, backing, tracker, device, io_surface, wgpu_format.value());
+#else // BUILDFLAG(USE_DAWN)
+ return nullptr;
+#endif // BUILDFLAG(USE_DAWN)
+}
+
} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.cc b/chromium/gpu/command_buffer/service/shared_image_factory.cc
index 2eb65e9ba98..36d96fb7896 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.cc
@@ -86,8 +86,7 @@ SharedImageFactory::SharedImageFactory(
shared_context_state_(context_state),
memory_tracker_(std::make_unique<MemoryTypeTracker>(memory_tracker)),
using_vulkan_(context_state && context_state->GrContextIsVulkan()),
- using_metal_(context_state && context_state->GrContextIsMetal()),
- using_dawn_(context_state && context_state->GrContextIsDawn()) {
+ using_skia_dawn_(context_state && context_state->GrContextIsDawn()) {
bool use_gl = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (use_gl) {
gl_backing_factory_ = std::make_unique<SharedImageBackingFactoryGLTexture>(
@@ -203,7 +202,7 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
SharedImageBackingFactory* factory = nullptr;
if (backing_factory_for_testing_) {
factory = backing_factory_for_testing_;
- } else if (!using_vulkan_ && !using_dawn_) {
+ } else if (!using_vulkan_ && !using_skia_dawn_) {
allow_legacy_mailbox = true;
factory = gl_backing_factory_.get();
} else {
@@ -213,6 +212,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
return false;
auto backing = factory->CreateSharedImage(mailbox, format, size, color_space,
usage, data);
+ if (backing)
+ backing->OnWriteSucceeded();
return RegisterBacking(std::move(backing), allow_legacy_mailbox);
}
@@ -235,6 +236,8 @@ bool SharedImageFactory::CreateSharedImage(const Mailbox& mailbox,
auto backing =
factory->CreateSharedImage(mailbox, client_id, std::move(handle), format,
surface_handle, size, color_space, usage);
+ if (backing)
+ backing->OnWriteSucceeded();
return RegisterBacking(std::move(backing), allow_legacy_mailbox);
}
@@ -310,7 +313,9 @@ bool SharedImageFactory::PresentSwapChain(const Mailbox& mailbox) {
#if defined(OS_FUCHSIA)
bool SharedImageFactory::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
decltype(buffer_collections_)::iterator it;
bool inserted;
std::tie(it, inserted) =
@@ -331,9 +336,9 @@ bool SharedImageFactory::RegisterSysmemBufferCollection(
VkDevice device =
vulkan_context_provider_->GetDeviceQueue()->GetVulkanDevice();
DCHECK(device != VK_NULL_HANDLE);
- it->second =
- vulkan_context_provider_->GetVulkanImplementation()
- ->RegisterSysmemBufferCollection(device, id, std::move(token));
+ it->second = vulkan_context_provider_->GetVulkanImplementation()
+ ->RegisterSysmemBufferCollection(
+ device, id, std::move(token), format, usage);
return true;
}
@@ -371,6 +376,25 @@ bool SharedImageFactory::IsSharedBetweenThreads(uint32_t usage) {
(usage & SHARED_IMAGE_USAGE_DISPLAY);
}
+bool SharedImageFactory::CanUseWrappedSkImage(uint32_t usage) const {
+ if (!wrapped_sk_image_factory_)
+ return false;
+
+ constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
+ SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
+ SHARED_IMAGE_USAGE_DISPLAY;
+
+ if (using_vulkan_ || using_skia_dawn_) {
+ // For SkiaRenderer/Vulkan+Dawn use WrappedSkImage if the usage is only
+ // raster and/or display.
+ return (usage & kWrappedSkImageUsage) && !(usage & ~kWrappedSkImageUsage);
+ } else {
+ // For d SkiaRenderer/GL only use WrappedSkImages for OOP-R because
+ // CopySubTexture() doesn't use Skia. https://crbug.com/984045
+ return usage == kWrappedSkImageUsage;
+ }
+}
+
SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
uint32_t usage,
viz::ResourceFormat format,
@@ -382,12 +406,9 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
bool using_dawn = usage & SHARED_IMAGE_USAGE_WEBGPU;
bool vulkan_usage = using_vulkan_ && (usage & SHARED_IMAGE_USAGE_DISPLAY);
bool gl_usage = usage & SHARED_IMAGE_USAGE_GLES2;
- bool share_between_gl_metal =
- using_metal_ && (usage & SHARED_IMAGE_USAGE_OOP_RASTERIZATION);
bool share_between_threads = IsSharedBetweenThreads(usage);
bool share_between_gl_vulkan = gl_usage && vulkan_usage;
bool using_interop_factory = share_between_gl_vulkan || using_dawn ||
- share_between_gl_metal ||
(usage & SHARED_IMAGE_USAGE_VIDEO_DECODE) ||
(share_between_threads && vulkan_usage);
@@ -397,23 +418,25 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
using_interop_factory |= usage & SHARED_IMAGE_USAGE_SCANOUT;
#endif
- // wrapped_sk_image_factory_ is only used for OOPR and supports
- // a limited number of flags (e.g. no SHARED_IMAGE_USAGE_SCANOUT).
- constexpr auto kWrappedSkImageUsage = SHARED_IMAGE_USAGE_RASTER |
- SHARED_IMAGE_USAGE_OOP_RASTERIZATION |
- SHARED_IMAGE_USAGE_DISPLAY;
- bool using_wrapped_sk_image =
- wrapped_sk_image_factory_ && (usage == kWrappedSkImageUsage) &&
- !using_interop_factory && !share_between_threads;
- using_interop_factory |= vulkan_usage && !using_wrapped_sk_image;
+ bool using_wrapped_sk_image = !using_interop_factory &&
+ !share_between_threads &&
+ CanUseWrappedSkImage(usage);
+ if (using_wrapped_sk_image) {
+ if (gmb_type == gfx::EMPTY_BUFFER ||
+ wrapped_sk_image_factory_->CanImportGpuMemoryBuffer(gmb_type)) {
+ *allow_legacy_mailbox = false;
+ return wrapped_sk_image_factory_.get();
+ }
+ }
+
+ using_interop_factory |= vulkan_usage;
if (gmb_type != gfx::EMPTY_BUFFER) {
bool interop_factory_supports_gmb =
interop_backing_factory_ &&
interop_backing_factory_->CanImportGpuMemoryBuffer(gmb_type);
- if (using_wrapped_sk_image ||
- (using_interop_factory && !interop_backing_factory_)) {
+ if (using_interop_factory && !interop_backing_factory_) {
LOG(ERROR) << "Unable to screate SharedImage backing: no support for the "
"requested GpuMemoryBufferType.";
return nullptr;
@@ -424,11 +447,8 @@ SharedImageBackingFactory* SharedImageFactory::GetFactoryByUsage(
using_interop_factory |= interop_factory_supports_gmb;
}
- *allow_legacy_mailbox = !using_wrapped_sk_image && !using_interop_factory &&
- !using_vulkan_ && !share_between_threads;
-
- if (using_wrapped_sk_image)
- return wrapped_sk_image_factory_.get();
+ *allow_legacy_mailbox =
+ !using_interop_factory && !using_vulkan_ && !share_between_threads;
if (using_interop_factory) {
// TODO(crbug.com/969114): Not all shared image factory implementations
diff --git a/chromium/gpu/command_buffer/service/shared_image_factory.h b/chromium/gpu/command_buffer/service/shared_image_factory.h
index 60cb6aa9346..9753cf95a2a 100644
--- a/chromium/gpu/command_buffer/service/shared_image_factory.h
+++ b/chromium/gpu/command_buffer/service/shared_image_factory.h
@@ -99,7 +99,9 @@ class GPU_GLES2_EXPORT SharedImageFactory {
#if defined(OS_FUCHSIA)
bool RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token);
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
bool ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // defined(OS_FUCHSIA)
@@ -117,20 +119,23 @@ class GPU_GLES2_EXPORT SharedImageFactory {
void RegisterSharedImageBackingFactoryForTesting(
SharedImageBackingFactory* factory);
+ MailboxManager* mailbox_manager() { return mailbox_manager_; }
+
private:
bool IsSharedBetweenThreads(uint32_t usage);
+ bool CanUseWrappedSkImage(uint32_t usage) const;
SharedImageBackingFactory* GetFactoryByUsage(
uint32_t usage,
viz::ResourceFormat format,
bool* allow_legacy_mailbox,
gfx::GpuMemoryBufferType gmb_type = gfx::EMPTY_BUFFER);
+
MailboxManager* mailbox_manager_;
SharedImageManager* shared_image_manager_;
SharedContextState* shared_context_state_;
std::unique_ptr<MemoryTypeTracker> memory_tracker_;
const bool using_vulkan_;
- const bool using_metal_;
- const bool using_dawn_;
+ const bool using_skia_dawn_;
// The set of SharedImages which have been created (and are being kept alive)
// by this factory.
diff --git a/chromium/gpu/command_buffer/service/shared_image_manager.cc b/chromium/gpu/command_buffer/service/shared_image_manager.cc
index ec4004578a8..578b38c7b84 100644
--- a/chromium/gpu/command_buffer/service/shared_image_manager.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_manager.cc
@@ -103,17 +103,19 @@ SharedImageManager::Register(std::unique_ptr<SharedImageBacking> backing,
DCHECK(backing->mailbox().IsSharedImage());
AutoLock autolock(this);
- const auto lower_bound = images_.lower_bound(backing->mailbox());
- if (lower_bound != images_.end() &&
- (*lower_bound)->mailbox() == backing->mailbox()) {
+ if (images_.find(backing->mailbox()) != images_.end()) {
LOG(ERROR) << "SharedImageManager::Register: Trying to register an "
"already registered mailbox.";
return nullptr;
}
+ // TODO(jonross): Determine how the direct destruction of a
+ // SharedImageRepresentationFactoryRef leads to ref-counting issues as
+ // well as thread-checking failures in tests.
auto factory_ref = std::make_unique<SharedImageRepresentationFactoryRef>(
this, backing.get(), tracker);
- images_.emplace_hint(lower_bound, std::move(backing));
+ images_.emplace(std::move(backing));
+
return factory_ref;
}
@@ -301,21 +303,32 @@ void SharedImageManager::OnRepresentationDestroyed(
CALLED_ON_VALID_THREAD();
AutoLock autolock(this);
- auto found = images_.find(mailbox);
- if (found == images_.end()) {
- LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to "
- "destroy a non existent mailbox.";
- return;
+
+ {
+ auto found = images_.find(mailbox);
+ if (found == images_.end()) {
+ LOG(ERROR) << "SharedImageManager::OnRepresentationDestroyed: Trying to "
+ "destroy a non existent mailbox.";
+ return;
+ }
+
+ // TODO(piman): When the original (factory) representation is destroyed, we
+ // should treat the backing as pending destruction and prevent additional
+ // representations from being created. This will help avoid races due to a
+ // consumer getting lucky with timing due to a representation inadvertently
+ // extending a backing's lifetime.
+ (*found)->ReleaseRef(representation);
}
- // TODO(piman): When the original (factory) representation is destroyed, we
- // should treat the backing as pending destruction and prevent additional
- // representations from being created. This will help avoid races due to a
- // consumer getting lucky with timing due to a representation inadvertently
- // extending a backing's lifetime.
- (*found)->ReleaseRef(representation);
- if (!(*found)->HasAnyRefs())
- images_.erase(found);
+ {
+ // TODO(jonross): Once the pending destruction TODO above is addressed then
+ // this block can be removed, and the deletion can occur directly. Currently
+ // SharedImageManager::OnRepresentationDestroyed can be nested, so we need
+ // to get the iterator again.
+ auto found = images_.find(mailbox);
+ if (found != images_.end() && (!(*found)->HasAnyRefs()))
+ images_.erase(found);
+ }
}
void SharedImageManager::OnMemoryDump(const Mailbox& mailbox,
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation.cc b/chromium/gpu/command_buffer/service/shared_image_representation.cc
index 20196375765..fd2d31b5b2e 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation.cc
@@ -117,6 +117,8 @@ SharedImageRepresentationSkia::BeginScopedWriteAccess(
if (!surface)
return nullptr;
+ backing()->OnWriteSucceeded();
+
return std::make_unique<ScopedWriteAccess>(
util::PassKey<SharedImageRepresentationSkia>(), this, std::move(surface));
}
@@ -157,6 +159,8 @@ SharedImageRepresentationSkia::BeginScopedReadAccess(
if (!promise_image_texture)
return nullptr;
+ backing()->OnReadSucceeded();
+
return std::make_unique<ScopedReadAccess>(
util::PassKey<SharedImageRepresentationSkia>(), this,
std::move(promise_image_texture));
@@ -178,6 +182,8 @@ SharedImageRepresentationOverlay::BeginScopedReadAccess(bool needs_gl_image) {
if (!BeginReadAccess())
return nullptr;
+ backing()->OnReadSucceeded();
+
return std::make_unique<ScopedReadAccess>(
util::PassKey<SharedImageRepresentationOverlay>(), this,
needs_gl_image ? GetGLImage() : nullptr);
@@ -205,6 +211,16 @@ SharedImageRepresentationDawn::BeginScopedAccess(
WGPUTexture texture = BeginAccess(usage);
if (!texture)
return nullptr;
+
+ constexpr auto kWriteUsage =
+ WGPUTextureUsage_CopyDst | WGPUTextureUsage_OutputAttachment;
+
+ if (usage & kWriteUsage) {
+ backing()->OnWriteSucceeded();
+ } else {
+ backing()->OnReadSucceeded();
+ }
+
return std::make_unique<ScopedAccess>(
util::PassKey<SharedImageRepresentationDawn>(), this, texture);
}
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
index c931778902f..5645db88629 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_gl_ozone.cc
@@ -63,10 +63,10 @@ SharedImageRepresentationGLOzone::Create(
gles2::Texture* texture = new gles2::Texture(gl_texture_service_id);
texture->SetLightweightRef();
texture->SetTarget(GL_TEXTURE_2D, 1 /*max_levels=*/);
- texture->sampler_state_.min_filter = GL_LINEAR;
- texture->sampler_state_.mag_filter = GL_LINEAR;
- texture->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
- texture->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
+ texture->set_min_filter(GL_LINEAR);
+ texture->set_mag_filter(GL_LINEAR);
+ texture->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture->set_wrap_s(GL_CLAMP_TO_EDGE);
GLenum gl_format = viz::GLDataFormat(format);
GLenum gl_type = viz::GLDataType(format);
diff --git a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
index d23d5358e2f..a5d75a204f0 100644
--- a/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_representation_skia_gl.cc
@@ -96,9 +96,6 @@ sk_sp<SkSurface> SharedImageRepresentationSkiaGL::BeginWriteAccess(
SkColorType sk_color_type = viz::ResourceFormatToClosestSkColorType(
/*gpu_compositing=*/true, format());
- // TODO(https://crbug.com/1054033): Switch back to
- // MakeFromBackendTextureAsRenderTarget once we no longer use GLRendererCopier
- // with surfaceless surfaces.
auto surface = SkSurface::MakeFromBackendTexture(
context_state_->gr_context(), promise_texture_->backendTexture(),
kTopLeft_GrSurfaceOrigin, final_msaa_count, sk_color_type,
diff --git a/chromium/gpu/command_buffer/service/shared_image_video.cc b/chromium/gpu/command_buffer/service/shared_image_video.cc
index 50838310832..db9b2524073 100644
--- a/chromium/gpu/command_buffer/service/shared_image_video.cc
+++ b/chromium/gpu/command_buffer/service/shared_image_video.cc
@@ -13,6 +13,7 @@
#include "components/viz/common/resources/resource_sizes.h"
#include "gpu/command_buffer/common/shared_image_usage.h"
#include "gpu/command_buffer/service/abstract_texture.h"
+#include "gpu/command_buffer/service/ahardwarebuffer_utils.h"
#include "gpu/command_buffer/service/mailbox_manager.h"
#include "gpu/command_buffer/service/memory_tracking.h"
#include "gpu/command_buffer/service/shared_context_state.h"
@@ -145,9 +146,9 @@ class SharedImageRepresentationGLTextureVideo
gles2::Texture* GetTexture() override { return texture_; }
bool BeginAccess(GLenum mode) override {
- // This representation should only be called for read.
- DCHECK_EQ(mode,
- static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM));
+ // This representation should only be called for read or overlay.
+ DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
auto* video_backing = static_cast<SharedImageVideo*>(backing());
video_backing->BeginGLReadAccess();
@@ -182,9 +183,9 @@ class SharedImageRepresentationGLTexturePassthroughVideo
}
bool BeginAccess(GLenum mode) override {
- // This representation should only be called for read.
- DCHECK_EQ(mode,
- static_cast<GLenum>(GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM));
+ // This representation should only be called for read or overlay.
+ DCHECK(mode == GL_SHARED_IMAGE_ACCESS_MODE_READ_CHROMIUM ||
+ mode == GL_SHARED_IMAGE_ACCESS_MODE_OVERLAY_CHROMIUM);
auto* video_backing = static_cast<SharedImageVideo*>(backing());
video_backing->BeginGLReadAccess();
@@ -266,13 +267,10 @@ class SharedImageRepresentationVideoSkiaVk
if (!vulkan_image_) {
DCHECK(!promise_texture_);
- gfx::GpuMemoryBufferHandle gmb_handle(
- scoped_hardware_buffer_->TakeBuffer());
- auto* device_queue =
- context_state_->vk_context_provider()->GetDeviceQueue();
- vulkan_image_ = VulkanImage::CreateFromGpuMemoryBufferHandle(
- device_queue, std::move(gmb_handle), size(), ToVkFormat(format()),
- 0 /* usage */);
+
+ vulkan_image_ =
+ CreateVkImageFromAhbHandle(scoped_hardware_buffer_->TakeBuffer(),
+ context_state_.get(), size(), format());
if (!vulkan_image_)
return nullptr;
diff --git a/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc
new file mode 100644
index 00000000000..213099665fd
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.cc
@@ -0,0 +1,127 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/command_buffer/service/shared_memory_region_wrapper.h"
+
+#include "base/logging.h"
+#include "base/numerics/checked_math.h"
+#include "base/system/sys_info.h"
+#include "components/viz/common/resources/resource_format_utils.h"
+#include "components/viz/common/resources/resource_sizes.h"
+#include "ui/gfx/gpu_memory_buffer.h"
+
+namespace gpu {
+namespace {
+
+// Validate that |stride| will work for pixels with |size| and |format|.
+bool ValidateStride(const gfx::Size size,
+ viz::ResourceFormat format,
+ int32_t stride) {
+ if (!base::IsValueInRangeForNumericType<size_t>(stride))
+ return false;
+
+ int32_t min_width_in_bytes = 0;
+ if (!viz::ResourceSizes::MaybeWidthInBytes(size.width(), format,
+ &min_width_in_bytes)) {
+ return false;
+ }
+
+ if (stride < min_width_in_bytes)
+ return false;
+
+ // Check that stride is a multiple of pixel byte size.
+ int bits_per_pixel = viz::BitsPerPixel(format);
+ switch (bits_per_pixel) {
+ case 64:
+ case 32:
+ case 16:
+ if (stride % (bits_per_pixel / 8) != 0)
+ return false;
+ break;
+ case 8:
+ case 4:
+ break;
+ default:
+ // YVU420 and YUV_420_BIPLANAR format aren't supported.
+ NOTREACHED();
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace
+
+SharedMemoryRegionWrapper::SharedMemoryRegionWrapper() = default;
+SharedMemoryRegionWrapper::SharedMemoryRegionWrapper(
+ SharedMemoryRegionWrapper&& other) = default;
+SharedMemoryRegionWrapper& SharedMemoryRegionWrapper::operator=(
+ SharedMemoryRegionWrapper&& other) = default;
+SharedMemoryRegionWrapper::~SharedMemoryRegionWrapper() = default;
+
+bool SharedMemoryRegionWrapper::Initialize(
+ const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ viz::ResourceFormat format) {
+ DCHECK(!mapping_.IsValid());
+
+ if (!handle.region.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB shared memory region.";
+ return false;
+ }
+
+ if (!ValidateStride(size, format, handle.stride)) {
+ DLOG(ERROR) << "Invalid GMB stride.";
+ return false;
+ }
+
+ // Minimize the amount of address space we use but make sure offset is a
+ // multiple of page size as required by MapAt().
+ size_t allocation_granularity = base::SysInfo::VMAllocationGranularity();
+ size_t memory_offset = handle.offset % allocation_granularity;
+ size_t map_offset =
+ allocation_granularity * (handle.offset / allocation_granularity);
+
+ base::CheckedNumeric<size_t> checked_size = handle.stride;
+ checked_size *= size.height();
+ checked_size += memory_offset;
+ if (!checked_size.IsValid()) {
+ DLOG(ERROR) << "Invalid GMB size.";
+ return false;
+ }
+
+ mapping_ = handle.region.MapAt(static_cast<off_t>(map_offset),
+ checked_size.ValueOrDie());
+
+ if (!mapping_.IsValid()) {
+ DLOG(ERROR) << "Failed to map shared memory.";
+ return false;
+ }
+
+ offset_ = memory_offset;
+ stride_ = handle.stride;
+
+ return true;
+}
+
+bool SharedMemoryRegionWrapper::IsValid() const {
+ return mapping_.IsValid();
+}
+
+uint8_t* SharedMemoryRegionWrapper::GetMemory() const {
+ DCHECK(IsValid());
+ return mapping_.GetMemoryAs<uint8_t>() + offset_;
+}
+
+base::span<const uint8_t> SharedMemoryRegionWrapper::GetMemoryAsSpan() const {
+ DCHECK(IsValid());
+ return mapping_.GetMemoryAsSpan<const uint8_t>().subspan(offset_);
+}
+
+size_t SharedMemoryRegionWrapper::GetStride() const {
+ DCHECK(IsValid());
+ return stride_;
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h
new file mode 100644
index 00000000000..280a09b840c
--- /dev/null
+++ b/chromium/gpu/command_buffer/service/shared_memory_region_wrapper.h
@@ -0,0 +1,48 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_
+#define GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_
+
+#include "base/containers/span.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "components/viz/common/resources/resource_format.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gfx {
+struct GpuMemoryBufferHandle;
+}
+
+namespace gpu {
+
+// Wrapper for shared memory region from a GpuMemoryBuffer with type
+// SHARED_MEMORY_BUFFER.
+class SharedMemoryRegionWrapper {
+ public:
+ SharedMemoryRegionWrapper();
+ SharedMemoryRegionWrapper(SharedMemoryRegionWrapper&& other);
+ SharedMemoryRegionWrapper& operator=(SharedMemoryRegionWrapper&& other);
+ ~SharedMemoryRegionWrapper();
+
+ // Validates that size, stride and format parameters make sense and maps
+ // memory for shared memory owned by |handle|. Shared memory stays mapped
+ // until destruction.
+ bool Initialize(const gfx::GpuMemoryBufferHandle& handle,
+ const gfx::Size& size,
+ viz::ResourceFormat format);
+
+ bool IsValid() const;
+ uint8_t* GetMemory() const;
+ base::span<const uint8_t> GetMemoryAsSpan() const;
+ size_t GetStride() const;
+
+ private:
+ base::WritableSharedMemoryMapping mapping_;
+ size_t offset_ = 0;
+ size_t stride_ = 0;
+};
+
+} // namespace gpu
+
+#endif // GPU_COMMAND_BUFFER_SERVICE_SHARED_MEMORY_REGION_WRAPPER_H_
diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
index 7ee33fbf629..a99a5c4279f 100644
--- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
+++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.cc
@@ -72,16 +72,6 @@ void SurfaceTextureGLOwner::EnsureTexImageBound() {
NOTREACHED();
}
-void SurfaceTextureGLOwner::GetTransformMatrix(float mtx[]) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- // If we don't have a SurfaceTexture, then the matrix doesn't matter. We
- // still initialize it for good measure.
- if (surface_texture_)
- surface_texture_->GetTransformMatrix(mtx);
- else
- memset(mtx, 0, sizeof(mtx[0]) * 16);
-}
-
void SurfaceTextureGLOwner::ReleaseBackBuffers() {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
if (surface_texture_)
@@ -104,12 +94,7 @@ SurfaceTextureGLOwner::GetAHardwareBuffer() {
return nullptr;
}
-gfx::Rect SurfaceTextureGLOwner::GetCropRect() {
- NOTREACHED() << "Don't use GetCropRect with SurfaceTextureGLOwner";
- return gfx::Rect();
-}
-
-void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
+bool SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) {
@@ -119,7 +104,7 @@ void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
if (!surface_texture_) {
*visible_rect = gfx::Rect();
*coded_size = gfx::Size();
- return;
+ return false;
}
float mtx[16];
@@ -154,6 +139,8 @@ void SurfaceTextureGLOwner::GetCodedSizeAndVisibleRect(
base::debug::DumpWithoutCrashing();
}
+
+ return true;
}
// static
diff --git a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h
index 80d23de9035..d1ecf45dab2 100644
--- a/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h
+++ b/chromium/gpu/command_buffer/service/surface_texture_gl_owner.h
@@ -33,12 +33,10 @@ class GPU_GLES2_EXPORT SurfaceTextureGLOwner : public TextureOwner {
gl::ScopedJavaSurface CreateJavaSurface() const override;
void UpdateTexImage() override;
void EnsureTexImageBound() override;
- void GetTransformMatrix(float mtx[16]) override;
void ReleaseBackBuffers() override;
std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() override;
- gfx::Rect GetCropRect() override;
- void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
+ bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) override;
diff --git a/chromium/gpu/command_buffer/service/sync_point_manager.h b/chromium/gpu/command_buffer/service/sync_point_manager.h
index c6fe88dc0be..496840b4448 100644
--- a/chromium/gpu/command_buffer/service/sync_point_manager.h
+++ b/chromium/gpu/command_buffer/service/sync_point_manager.h
@@ -15,7 +15,7 @@
#include "base/atomic_sequence_num.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/condition_variable.h"
diff --git a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
index d07cfd627d2..290bf973958 100644
--- a/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
+++ b/chromium/gpu/command_buffer/service/test_shared_image_backing.cc
@@ -146,10 +146,10 @@ TestSharedImageBacking::TestSharedImageBacking(
texture_ = new gles2::Texture(service_id_);
texture_->SetLightweightRef();
texture_->SetTarget(GL_TEXTURE_2D, 1);
- texture_->sampler_state_.min_filter = GL_LINEAR;
- texture_->sampler_state_.mag_filter = GL_LINEAR;
- texture_->sampler_state_.wrap_s = GL_CLAMP_TO_EDGE;
- texture_->sampler_state_.wrap_t = GL_CLAMP_TO_EDGE;
+ texture_->set_min_filter(GL_LINEAR);
+ texture_->set_mag_filter(GL_LINEAR);
+ texture_->set_wrap_t(GL_CLAMP_TO_EDGE);
+ texture_->set_wrap_s(GL_CLAMP_TO_EDGE);
texture_->SetLevelInfo(GL_TEXTURE_2D, 0, GLInternalFormat(format),
size.width(), size.height(), 1, 0,
GLDataFormat(format), GLDataType(format), gfx::Rect());
diff --git a/chromium/gpu/command_buffer/service/texture_manager.h b/chromium/gpu/command_buffer/service/texture_manager.h
index 0fece4ee111..56ffa63b427 100644
--- a/chromium/gpu/command_buffer/service/texture_manager.h
+++ b/chromium/gpu/command_buffer/service/texture_manager.h
@@ -35,24 +35,7 @@ class ProgressReporter;
namespace gpu {
class DecoderContext;
-class ExternalVkImageBacking;
-class ExternalVkImageGlRepresentation;
class ServiceDiscardableManager;
-class SharedImageBackingGLTexture;
-class SharedImageBackingFactoryGLTexture;
-class SharedImageBackingAHB;
-class SharedImageBackingEglImage;
-class SharedImageRepresentationGLTexture;
-class SharedImageRepresentationEglImageGLTexture;
-class SharedImageRepresentationGLTextureAHB;
-class SharedImageRepresentationSkiaGLAHB;
-class SharedImageBackingIOSurface;
-class SharedImageRepresentationGLTextureIOSurface;
-class SharedImageRepresentationSkiaIOSurface;
-class SharedImageRepresentationGLOzone;
-class SharedImageVideo;
-class StreamTexture;
-class TestSharedImageBacking;
namespace gles2 {
class GLStreamTextureImage;
@@ -187,6 +170,28 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLenum alpha;
};
+ struct LevelInfo {
+ LevelInfo();
+ LevelInfo(const LevelInfo& rhs);
+ ~LevelInfo();
+
+ gfx::Rect cleared_rect;
+ GLenum target = 0;
+ GLint level = -1;
+ GLenum internal_format = 0;
+ GLsizei width = 0;
+ GLsizei height = 0;
+ GLsizei depth = 0;
+ GLint border = 0;
+ GLenum format = 0;
+ GLenum type = 0;
+ scoped_refptr<gl::GLImage> image;
+ scoped_refptr<GLStreamTextureImage> stream_texture_image;
+ ImageState image_state = UNBOUND;
+ uint32_t estimated_size = 0;
+ bool internal_workaround = false;
+ };
+
explicit Texture(GLuint service_id);
// TextureBase implementation:
@@ -198,22 +203,36 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
return sampler_state_;
}
+ void set_min_filter(GLenum min_filter) {
+ sampler_state_.min_filter = min_filter;
+ }
+
GLenum min_filter() const {
return sampler_state_.min_filter;
}
+ void set_mag_filter(GLenum mag_filter) {
+ sampler_state_.mag_filter = mag_filter;
+ }
+
GLenum mag_filter() const {
return sampler_state_.mag_filter;
}
+ void set_wrap_r(GLenum wrap_r) { sampler_state_.wrap_r = wrap_r; }
+
GLenum wrap_r() const {
return sampler_state_.wrap_r;
}
+ void set_wrap_s(GLenum wrap_s) { sampler_state_.wrap_s = wrap_s; }
+
GLenum wrap_s() const {
return sampler_state_.wrap_s;
}
+ void set_wrap_t(GLenum wrap_t) { sampler_state_.wrap_t = wrap_t; }
+
GLenum wrap_t() const {
return sampler_state_.wrap_t;
}
@@ -429,26 +448,43 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Returns GL_NONE on error.
GLenum GetInternalFormatOfBaseLevel() const;
+ void SetLightweightRef();
+
+ void RemoveLightweightRef(bool have_context);
+
+ // Set the info for a particular level.
+ void SetLevelInfo(GLenum target,
+ GLint level,
+ GLenum internal_format,
+ GLsizei width,
+ GLsizei height,
+ GLsizei depth,
+ GLint border,
+ GLenum format,
+ GLenum type,
+ const gfx::Rect& cleared_rect);
+
+ // Returns the LevelInfo for |target| and |level| if it's set, else nullptr.
+ const LevelInfo* GetLevelInfo(GLint target, GLint level) const;
+
+ // Sets the Texture's target
+ // Parameters:
+ // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or
+ // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB
+ // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3)
+ // max_levels: The maximum levels this type of target can have.
+ void SetTarget(GLenum target, GLint max_levels);
+
+ void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle);
+
+ bool NeedsMips() const {
+ return sampler_state_.min_filter != GL_NEAREST &&
+ sampler_state_.min_filter != GL_LINEAR;
+ }
+
private:
friend class MailboxManagerSync;
friend class MailboxManagerTest;
- friend class gpu::ExternalVkImageBacking;
- friend class gpu::ExternalVkImageGlRepresentation;
- friend class gpu::SharedImageVideo;
- friend class gpu::SharedImageBackingGLTexture;
- friend class gpu::SharedImageBackingFactoryGLTexture;
- friend class gpu::SharedImageBackingAHB;
- friend class gpu::SharedImageBackingEglImage;
- friend class gpu::SharedImageRepresentationGLTextureAHB;
- friend class gpu::SharedImageRepresentationEglImageGLTexture;
- friend class gpu::SharedImageRepresentationSkiaGLAHB;
- friend class gpu::SharedImageBackingIOSurface;
- friend class gpu::SharedImageRepresentationGLTextureIOSurface;
- friend class gpu::SharedImageRepresentationSkiaIOSurface;
- friend class gpu::SharedImageRepresentationGLOzone;
- friend class gpu::StreamTexture;
- friend class gpu::TestSharedImageBacking;
- friend class AbstractTextureImplOnSharedContext;
friend class TextureDefinition;
friend class TextureManager;
friend class TextureRef;
@@ -458,8 +494,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
~Texture() override;
void AddTextureRef(TextureRef* ref);
void RemoveTextureRef(TextureRef* ref, bool have_context);
- void SetLightweightRef();
- void RemoveLightweightRef(bool have_context);
void MaybeDeleteThis(bool have_context);
// Condition on which this texture is renderable. Can be ONLY_IF_NPOT if it
@@ -474,28 +508,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
CAN_RENDER_NEEDS_VALIDATION,
};
- struct LevelInfo {
- LevelInfo();
- LevelInfo(const LevelInfo& rhs);
- ~LevelInfo();
-
- gfx::Rect cleared_rect;
- GLenum target;
- GLint level;
- GLenum internal_format;
- GLsizei width;
- GLsizei height;
- GLsizei depth;
- GLint border;
- GLenum format;
- GLenum type;
- scoped_refptr<gl::GLImage> image;
- scoped_refptr<GLStreamTextureImage> stream_texture_image;
- ImageState image_state;
- uint32_t estimated_size;
- bool internal_workaround;
- };
-
struct FaceInfo {
FaceInfo();
FaceInfo(const FaceInfo& other);
@@ -514,23 +526,9 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLStreamTextureImage* stream_texture_image,
ImageState state);
- // Returns the LevelInfo for |target| and |level| if it's set, else NULL.
- const LevelInfo* GetLevelInfo(GLint target, GLint level) const;
// Returns NULL if the base level is not defined.
const LevelInfo* GetBaseLevelInfo() const;
- // Set the info for a particular level.
- void SetLevelInfo(GLenum target,
- GLint level,
- GLenum internal_format,
- GLsizei width,
- GLsizei height,
- GLsizei depth,
- GLint border,
- GLenum format,
- GLenum type,
- const gfx::Rect& cleared_rect);
-
// Causes us to report |service_id| as our service id, but does not delete
// it when we are destroyed. Will rebind any OES_EXTERNAL texture units to
// our new service id in all contexts. If |service_id| is zero, then we
@@ -573,11 +571,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
// Makes each of the mip levels as though they were generated.
void MarkMipmapsGenerated();
- bool NeedsMips() const {
- return sampler_state_.min_filter != GL_NEAREST &&
- sampler_state_.min_filter != GL_LINEAR;
- }
-
// True if this texture meets all the GLES2 criteria for rendering.
// See section 3.8.2 of the GLES2 spec.
bool CanRender(const FeatureInfo* feature_info) const;
@@ -618,14 +611,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLenum type,
bool immutable);
- // Sets the Texture's target
- // Parameters:
- // target: GL_TEXTURE_2D or GL_TEXTURE_CUBE_MAP or
- // GL_TEXTURE_EXTERNAL_OES or GL_TEXTURE_RECTANGLE_ARB
- // GL_TEXTURE_2D_ARRAY or GL_TEXTURE_3D (for GLES3)
- // max_levels: The maximum levels this type of target can have.
- void SetTarget(GLenum target, GLint max_levels);
-
// Update info about this texture.
void Update();
@@ -682,7 +667,6 @@ class GPU_GLES2_EXPORT Texture final : public TextureBase {
GLuint owned_service_id() const { return owned_service_id_; }
GLenum GetCompatibilitySwizzleForChannel(GLenum channel);
- void SetCompatibilitySwizzle(const CompatibilitySwizzle* swizzle);
// Info about each face and level of texture.
std::vector<FaceInfo> face_infos_;
diff --git a/chromium/gpu/command_buffer/service/texture_owner.h b/chromium/gpu/command_buffer/service/texture_owner.h
index 8f0fced6b6a..81bd88c1bda 100644
--- a/chromium/gpu/command_buffer/service/texture_owner.h
+++ b/chromium/gpu/command_buffer/service/texture_owner.h
@@ -83,7 +83,6 @@ class GPU_GLES2_EXPORT TextureOwner
virtual void EnsureTexImageBound() = 0;
// Transformation matrix if any associated with the texture image.
- virtual void GetTransformMatrix(float mtx[16]) = 0;
virtual void ReleaseBackBuffers() = 0;
// Retrieves the AHardwareBuffer from the latest available image data.
@@ -92,10 +91,6 @@ class GPU_GLES2_EXPORT TextureOwner
virtual std::unique_ptr<base::android::ScopedHardwareBufferFenceSync>
GetAHardwareBuffer() = 0;
- // Provides the crop rectangle associated with the most recent image. The
- // crop rectangle specifies the region of valid pixels in the image.
- virtual gfx::Rect GetCropRect() = 0;
-
// Retrieves backing size and visible rect associated with the most recent
// image. |rotated_visible_size| is the size of the visible region
// post-transform in pixels and is used for SurfaceTexture case. Transform
@@ -103,7 +98,8 @@ class GPU_GLES2_EXPORT TextureOwner
// expect to have rotation and MediaPlayer reports rotated size. For
// MediaCodec we don't expect rotation in ST so visible_size (i.e crop rect
// from codec) can be used.
- virtual void GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
+ // Returns whether call was successful or not.
+ virtual bool GetCodedSizeAndVisibleRect(gfx::Size rotated_visible_size,
gfx::Size* coded_size,
gfx::Rect* visible_rect) = 0;
diff --git a/chromium/gpu/command_buffer/service/vertex_array_manager.h b/chromium/gpu/command_buffer/service/vertex_array_manager.h
index 2053fb2342b..cc1f84a8c79 100644
--- a/chromium/gpu/command_buffer/service/vertex_array_manager.h
+++ b/chromium/gpu/command_buffer/service/vertex_array_manager.h
@@ -9,7 +9,6 @@
#include <unordered_map>
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "gpu/command_buffer/service/gl_utils.h"
diff --git a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
index 044d0255992..0bdb52371b7 100644
--- a/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
+++ b/chromium/gpu/command_buffer/service/vertex_attrib_manager.h
@@ -10,7 +10,7 @@
#include <list>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/memory/ref_counted.h"
#include "build/build_config.h"
#include "gpu/command_buffer/service/buffer_manager.h"
diff --git a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
index a4c8530dfd8..514e52a4d78 100644
--- a/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
+++ b/chromium/gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -435,10 +435,7 @@ class WebGPUDecoderImpl final : public WebGPUDecoder {
const volatile void* buffer,
int num_entries,
int* entries_processed) override;
- base::StringPiece GetLogPrefix() override {
- NOTIMPLEMENTED();
- return "";
- }
+ base::StringPiece GetLogPrefix() override { return "WebGPUDecoderImpl"; }
void BindImage(uint32_t client_texture_id,
uint32_t texture_target,
gl::GLImage* image,
diff --git a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
index 591d0923180..7cd628f1325 100644
--- a/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
+++ b/chromium/gpu/command_buffer/service/wrapped_sk_image.cc
@@ -17,6 +17,7 @@
#include "gpu/command_buffer/service/shared_context_state.h"
#include "gpu/command_buffer/service/shared_image_backing.h"
#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/command_buffer/service/shared_memory_region_wrapper.h"
#include "gpu/command_buffer/service/skia_utils.h"
#include "skia/buildflags.h"
#include "third_party/skia/include/core/SkCanvas.h"
@@ -25,6 +26,7 @@
#include "third_party/skia/include/core/SkSurfaceProps.h"
#include "third_party/skia/include/gpu/GrBackendSurface.h"
#include "third_party/skia/include/gpu/GrTypes.h"
+#include "ui/gfx/buffer_format_util.h"
#include "ui/gl/gl_context.h"
#include "ui/gl/gl_gl_api_implementation.h"
#include "ui/gl/trace_util.h"
@@ -39,6 +41,13 @@ namespace raster {
namespace {
+SkImageInfo MakeSkImageInfo(const gfx::Size& size, viz::ResourceFormat format) {
+ return SkImageInfo::Make(size.width(), size.height(),
+ ResourceFormatToClosestSkColorType(
+ /*gpu_compositing=*/true, format),
+ kOpaque_SkAlphaType);
+}
+
class WrappedSkImage : public ClearTrackingSharedImageBacking {
public:
~WrappedSkImage() override {
@@ -59,7 +68,24 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
return false;
}
- void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {}
+ void Update(std::unique_ptr<gfx::GpuFence> in_fence) override {
+ if (shared_memory_wrapper_.IsValid()) {
+ DCHECK(!in_fence);
+
+ if (context_state_->context_lost())
+ return;
+
+ DCHECK(context_state_->IsCurrent(nullptr));
+
+ SkImageInfo info = MakeSkImageInfo(size(), format());
+ SkPixmap pixmap(info, shared_memory_wrapper_.GetMemory(),
+ shared_memory_wrapper_.GetStride());
+ if (!context_state_->gr_context()->updateBackendTexture(
+ backend_texture_, &pixmap, /*levels=*/1, nullptr, nullptr)) {
+ DLOG(ERROR) << "Failed to update WrappedSkImage texture";
+ }
+ }
+ }
void OnMemoryDump(const std::string& dump_name,
base::trace_event::MemoryAllocatorDump* dump,
@@ -138,11 +164,28 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
DCHECK(!!context_state_);
}
- bool Initialize(const SkImageInfo& info, base::span<const uint8_t> data) {
+ bool InitializeGMB(const SkImageInfo& info,
+ SharedMemoryRegionWrapper shm_wrapper) {
+ if (Initialize(info, shm_wrapper.GetMemoryAsSpan(),
+ shm_wrapper.GetStride())) {
+ shared_memory_wrapper_ = std::move(shm_wrapper);
+ return true;
+ }
+ return false;
+ }
+
+ // |pixels| optionally contains pixel data to upload to the texture. If pixel
+ // data is provided and the image format is not ETC1 then |stride| is used. If
+ // |stride| is non-zero then it's used as the stride, otherwise
+ // SkImageInfo::minRowBytes() is used for the stride. For ETC1 textures pixel
+ // data must be provided since updating compressed textures is not supported.
+ bool Initialize(const SkImageInfo& info,
+ base::span<const uint8_t> pixels,
+ size_t stride) {
if (context_state_->context_lost())
return false;
- DCHECK(context_state_->IsCurrent(nullptr));
+ DCHECK(context_state_->IsCurrent(nullptr));
context_state_->set_need_context_state_reset(true);
#if BUILDFLAG(ENABLE_VULKAN)
@@ -156,33 +199,30 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
auto is_protected = GrProtected::kNo;
#endif
- if (!data.empty()) {
+ if (pixels.data()) {
if (format() == viz::ResourceFormat::ETC1) {
backend_texture_ =
context_state_->gr_context()->createCompressedBackendTexture(
size().width(), size().height(), SkImage::kETC1_CompressionType,
- data.data(), data.size(), GrMipMapped::kNo, is_protected);
+ pixels.data(), pixels.size(), GrMipMapped::kNo, is_protected);
} else {
- SkBitmap bitmap;
- if (!bitmap.installPixels(info, const_cast<uint8_t*>(data.data()),
- info.minRowBytes())) {
- return false;
- }
+ if (!stride)
+ stride = info.minRowBytes();
+ SkPixmap pixmap(info, pixels.data(), stride);
backend_texture_ = context_state_->gr_context()->createBackendTexture(
- bitmap.pixmap(), GrRenderable::kNo, is_protected);
+ pixmap, GrRenderable::kNo, is_protected);
}
if (!backend_texture_.isValid())
return false;
SetCleared();
- OnWriteSucceeded();
} else {
+ DCHECK_NE(format(), viz::ResourceFormat::ETC1);
+#if DCHECK_IS_ON()
// Initializing to bright green makes it obvious if the pixels are not
// properly set before they are displayed (e.g. https://crbug.com/956555).
// We don't do this on release builds because there is a slight overhead.
-
-#if DCHECK_IS_ON()
backend_texture_ = context_state_->gr_context()->createBackendTexture(
size().width(), size().height(), GetSkColorType(), SkColors::kBlue,
GrMipMapped::kNo, GrRenderable::kYes, is_protected);
@@ -191,12 +231,12 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
size().width(), size().height(), GetSkColorType(), GrMipMapped::kNo,
GrRenderable::kYes, is_protected);
#endif
- }
- if (!backend_texture_.isValid()) {
- DLOG(ERROR) << "createBackendTexture() failed with SkColorType:"
- << GetSkColorType();
- return false;
+ if (!backend_texture_.isValid()) {
+ DLOG(ERROR) << "createBackendTexture() failed with SkColorType:"
+ << GetSkColorType();
+ return false;
+ }
}
promise_texture_ = SkPromiseImageTexture::Make(backend_texture_);
@@ -236,6 +276,9 @@ class WrappedSkImage : public ClearTrackingSharedImageBacking {
sk_sp<SkPromiseImageTexture> promise_texture_;
int surface_msaa_count_ = 0;
+ // Set for shared memory GMB.
+ SharedMemoryRegionWrapper shared_memory_wrapper_;
+
uint64_t tracing_id_ = 0;
DISALLOW_COPY_AND_ASSIGN(WrappedSkImage);
@@ -324,15 +367,12 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
const gfx::ColorSpace& color_space,
uint32_t usage,
base::span<const uint8_t> data) {
- auto info = SkImageInfo::Make(size.width(), size.height(),
- ResourceFormatToClosestSkColorType(
- /*gpu_compositing=*/true, format),
- kOpaque_SkAlphaType);
+ auto info = MakeSkImageInfo(size, format);
size_t estimated_size = info.computeMinByteSize();
std::unique_ptr<WrappedSkImage> texture(
new WrappedSkImage(mailbox, format, size, color_space, usage,
estimated_size, context_state_));
- if (!texture->Initialize(info, data))
+ if (!texture->Initialize(info, data, /*stride=*/0))
return nullptr;
return texture;
}
@@ -346,13 +386,41 @@ std::unique_ptr<SharedImageBacking> WrappedSkImageFactory::CreateSharedImage(
const gfx::Size& size,
const gfx::ColorSpace& color_space,
uint32_t usage) {
- NOTREACHED();
- return nullptr;
+ DCHECK_EQ(handle.type, gfx::SHARED_MEMORY_BUFFER);
+
+ if (!gpu::IsImageSizeValidForGpuMemoryBufferFormat(size, buffer_format)) {
+ DLOG(ERROR) << "Invalid image size for format.";
+ return nullptr;
+ }
+
+ if (gfx::NumberOfPlanesForLinearBufferFormat(buffer_format) != 1) {
+ DLOG(ERROR) << "Invalid image format.";
+ return nullptr;
+ }
+
+ viz::ResourceFormat format = viz::GetResourceFormat(buffer_format);
+
+ // The Skia API to handle compressed texture is limited and not compatible
+ // with updating the texture or custom strides.
+ DCHECK_NE(format, viz::ResourceFormat::ETC1);
+
+ SharedMemoryRegionWrapper shm_wrapper;
+ if (!shm_wrapper.Initialize(handle, size, format))
+ return nullptr;
+
+ auto info = MakeSkImageInfo(size, format);
+ std::unique_ptr<WrappedSkImage> texture(
+ new WrappedSkImage(mailbox, format, size, color_space, usage,
+ info.computeMinByteSize(), context_state_));
+ if (!texture->InitializeGMB(info, std::move(shm_wrapper)))
+ return nullptr;
+
+ return texture;
}
bool WrappedSkImageFactory::CanImportGpuMemoryBuffer(
gfx::GpuMemoryBufferType memory_buffer_type) {
- return false;
+ return memory_buffer_type == gfx::SHARED_MEMORY_BUFFER;
}
std::unique_ptr<SharedImageRepresentationSkia> WrappedSkImage::ProduceSkia(
diff --git a/chromium/gpu/config/gpu_blocklist_unittest.cc b/chromium/gpu/config/gpu_blocklist_unittest.cc
index 68428cee88a..4f1df7a10ea 100644
--- a/chromium/gpu/config/gpu_blocklist_unittest.cc
+++ b/chromium/gpu/config/gpu_blocklist_unittest.cc
@@ -20,7 +20,7 @@ class GpuBlocklistTest : public testing::Test {
void RunFeatureTest(GpuFeatureType feature_type) {
const int kFeatureListForEntry1[1] = {feature_type};
- const uint32_t kDeviceIDsForEntry1[1] = {0x0640};
+ const GpuControlList::Device kDevicesForEntry1[1] = {{0x0640, 0x0}};
const GpuControlList::Entry kTestEntries[1] = {{
1, // id
"Test entry", // description
@@ -38,8 +38,8 @@ class GpuBlocklistTest : public testing::Test {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x10de, // vendor_id
- 1, // DeviceIDs size
- kDeviceIDsForEntry1, // DeviceIDs
+ 1, // Devices size
+ kDevicesForEntry1, // Devices
GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
diff --git a/chromium/gpu/config/gpu_control_list.cc b/chromium/gpu/config/gpu_control_list.cc
index b59016f27ee..f2a193025d7 100644
--- a/chromium/gpu/config/gpu_control_list.cc
+++ b/chromium/gpu/config/gpu_control_list.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -159,6 +160,33 @@ bool GpuControlList::Version::Contains(const std::string& version_string,
if (op == kBetween)
ref_version2.erase(ref_version2.begin());
}
+ } else if (schema == kVersionSchemaNvidiaDriver) {
+ // The driver version we get from the os is "XX.XX.XXXA.BBCC", while the
+ // workaround is of the form "ABB.CC". Drop the first two stanzas from the
+ // detected version, erase all but the last character of the third, and move
+ // "B" to the previous stanza.
+ if (version.size() != 4)
+ return false;
+ // Remember that the detected version might not have leading zeros, so we
+ // have to be a bit careful. [2] is of the form "001A", where A > 0, so we
+ // just care that there's at least one digit. However, if there's less than
+ // that, the splitter stops anyway on that stanza, and the check for four
+ // stanzas will fail instead.
+ version.erase(version.begin(), version.begin() + 2);
+ version[0].erase(0, version[0].length() - 1);
+ // The last stanza may be missing leading zeros, so handle them.
+ if (version[1].length() < 3) {
+ // Two or more removed leading zeros, so BB are both zero.
+ version[0] += "00";
+ } else if (version[1].length() < 4) {
+ // One removed leading zero. BB is 0[1-9].
+ version[0] += "0" + version[1].substr(0, 1);
+ version[1].erase(0, 1);
+ } else {
+ // No leading zeros.
+ version[0] += version[1].substr(0, 2);
+ version[1].erase(0, 2);
+ }
}
int relation = Version::Compare(version, ref_version1, style);
switch (op) {
@@ -418,9 +446,9 @@ bool GpuControlList::Conditions::Contains(OsType target_os_type,
}
}
} else if (intel_gpu_generation.IsSpecified()) {
- for (size_t ii = 0; ii < candidates.size(); ++ii) {
- std::string candidate_generation = GetIntelGpuGeneration(
- candidates[ii].vendor_id, candidates[ii].device_id);
+ for (auto& candidate : candidates) {
+ std::string candidate_generation =
+ GetIntelGpuGeneration(candidate.vendor_id, candidate.device_id);
if (candidate_generation.empty())
continue;
if (intel_gpu_generation.Contains(candidate_generation)) {
@@ -429,24 +457,29 @@ bool GpuControlList::Conditions::Contains(OsType target_os_type,
}
}
} else {
- GPUInfo::GPUDevice gpu;
- gpu.vendor_id = vendor_id;
- if (device_id_size == 0) {
- for (size_t ii = 0; ii < candidates.size(); ++ii) {
- if (gpu.vendor_id == candidates[ii].vendor_id) {
+ if (device_size == 0) {
+ for (auto& candidate : candidates) {
+ if (vendor_id == candidate.vendor_id) {
found = true;
break;
}
}
} else {
- for (size_t ii = 0; ii < device_id_size; ++ii) {
- gpu.device_id = device_ids[ii];
- for (size_t jj = 0; jj < candidates.size(); ++jj) {
- if (gpu.vendor_id == candidates[jj].vendor_id &&
- gpu.device_id == candidates[jj].device_id) {
- found = true;
- break;
- }
+ for (size_t ii = 0; !found && ii < device_size; ++ii) {
+ uint32_t device_id = devices[ii].device_id;
+#if defined(OS_WIN)
+ uint32_t revision = devices[ii].revision;
+#endif // OS_WIN
+ for (auto& candidate : candidates) {
+ if (vendor_id != candidate.vendor_id ||
+ device_id != candidate.device_id)
+ continue;
+#if defined(OS_WIN)
+ if (revision && revision != candidate.revision)
+ continue;
+#endif // OS_WIN
+ found = true;
+ break;
}
}
}
diff --git a/chromium/gpu/config/gpu_control_list.h b/chromium/gpu/config/gpu_control_list.h
index 0820fa38738..9c6077b7ef4 100644
--- a/chromium/gpu/config/gpu_control_list.h
+++ b/chromium/gpu/config/gpu_control_list.h
@@ -92,6 +92,10 @@ class GPU_EXPORT GpuControlList {
// DDDD(old schema) or CCC.DDDD(new schema) is the build number.
// That is, indicates the actual driver number.
kVersionSchemaIntelDriver,
+ // The version format of Nvidia drivers is XX.XX.XXXA.AAAA where the X's
+ // can be any digits, and the A's are the actual version. The workaround
+ // list specifies them as AAA.AA to match how Nvidia publishes them.
+ kVersionSchemaNvidiaDriver,
};
enum SupportedOrNot {
@@ -181,12 +185,17 @@ class GPU_EXPORT GpuControlList {
static GLType GetDefaultGLType();
};
+ struct GPU_EXPORT Device {
+ uint32_t device_id;
+ uint32_t revision = 0u;
+ };
+
struct GPU_EXPORT Conditions {
OsType os_type;
Version os_version;
uint32_t vendor_id;
- size_t device_id_size;
- const uint32_t* device_ids;
+ size_t device_size;
+ const Device* devices;
MultiGpuCategory multi_gpu_category;
MultiGpuStyle multi_gpu_style;
const DriverInfo* driver_info;
diff --git a/chromium/gpu/config/gpu_control_list_entry_unittest.cc b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
index 176081710c9..4ff35708d33 100644
--- a/chromium/gpu/config/gpu_control_list_entry_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_entry_unittest.cc
@@ -1156,4 +1156,38 @@ TEST_F(GpuControlListEntryTest, IntelOldDriverVersionEntry) {
EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
}
+#if defined(OS_WIN)
+TEST_F(GpuControlListEntryTest, DeviceRevisionEntry) {
+ const Entry& entry = GetEntry(kGpuControlListEntryTest_DeviceRevisionEntry);
+ GPUInfo gpu_info;
+ gpu_info.gpu.vendor_id = 0x1002;
+ gpu_info.gpu.device_id = 0x15DD;
+ gpu_info.gpu.revision = 0x86;
+ gpu_info.gpu.driver_version = "26.20.12055.1000";
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.driver_version = "26.20.15023.6032";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.device_id = 0x15D8;
+ gpu_info.gpu.revision = 0xE1;
+ gpu_info.gpu.driver_version = "26.20.12055.1000";
+ EXPECT_FALSE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.revision = 0xE3;
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+}
+
+TEST_F(GpuControlListEntryTest, DeviceRevisionUnspecifiedEntry) {
+ const Entry& entry =
+ GetEntry(kGpuControlListEntryTest_DeviceRevisionUnspecifiedEntry);
+ GPUInfo gpu_info;
+ gpu_info.gpu.vendor_id = 0x1002;
+ gpu_info.gpu.device_id = 0x15DD;
+ gpu_info.gpu.revision = 0x86;
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.revision = 0x91;
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+ gpu_info.gpu.revision = 0x0;
+ EXPECT_TRUE(entry.Contains(kOsWin, "", gpu_info));
+}
+#endif // OS_WIN
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_format.txt b/chromium/gpu/config/gpu_control_list_format.txt
index 6def629c495..1ff95688f54 100644
--- a/chromium/gpu/config/gpu_control_list_format.txt
+++ b/chromium/gpu/config/gpu_control_list_format.txt
@@ -22,69 +22,75 @@
// "os".
// "version" is a VERSION structure (defined below).
// 3. "vendor_id" is a string. 0 is reserved.
-// 4. "device_id" is an array of strings. 0 is reserved.
-// 5. "multi_gpu_style" is a string, valid values include:
+// 4. "device_id" is an array of strings. 0 is reserved
+// 5. "device_revision" is an array of strings. Default is 0. This is Windows
+// only. There are three ways to specify a device on Windows:
+// a) only specify device IDs;
+// b) specify one device ID, associate with multiple revisions;
+// c) specify k device IDs, associate with k device revisions.
+// https://docs.microsoft.com/en-us/windows-hardware/drivers/install/identifiers-for-pci-devices
+// 6. "multi_gpu_style" is a string, valid values include:
// a) "optimus": NVIDIA dual GPU
// b) "amd_switchable": AMD dual GPU
// c) "amd_switchable_integrated": AMD dual GPU, integrated GPU is active
// d) "amd_switchable_discrete": AMD dual GPU, discrete GPU is active
// c) and d) are only valid on Win, as on Mac we can switch GPU on the fly.
-// 6. "multi_gpu_category" is a string, valid values include "any", "primary",
+// 7. "multi_gpu_category" is a string, valid values include "any", "primary",
// "secondary", and "active". If unspecified, the default value is "active".
// See gpu_control_list.h for more details on the meanings of the strings.
-// 7. "driver_vendor" is a string pattern. (Not available on Windows)
-// 8. "driver_version" is a VERSION structure (defined below). On Windows
+// 8. "driver_vendor" is a string pattern. (Not available on Windows)
+// 9. "driver_version" is a VERSION structure (defined below). On Windows
// this value can be retrieved by searching for the "Driver File Version" in
// dxdiag.txt
-// 9. "gl_type" is a string, valid values include "gl", "gles", and "angle".
+// 10. "gl_type" is a string, valid values include "gl", "gles", and "angle".
// If "gl_version" is specified and "gl_type" is not, use the default value.
// The default value on Android is "gles", on Windows is "angle", on other
// platforms is "gl".
-// 10. "gl_version" is a VERSION structure (defined below).
-// 11. "gl_vendor" is a string pattern.
-// 12. "gl_renderer" is a string pattern.
-// 13. "gl_extensions" is a string pattern.
-// 14. "machine_model_name" is an array of string patterns.
-// 15. "machine_model_version" is a VERSION structure (defined below).
-// 16. "gpu_count" is a INT structure (defined below).
-// 17. "cpu_info" is a string pattern.
-// 18. "exceptions" is a list of entries.
-// 19. "features" is a list of gpu control list options, which can be
+// 11. "gl_version" is a VERSION structure (defined below).
+// 12. "gl_vendor" is a string pattern.
+// 13. "gl_renderer" is a string pattern.
+// 14. "gl_extensions" is a string pattern.
+// 15. "machine_model_name" is an array of string patterns.
+// 16. "machine_model_version" is a VERSION structure (defined below).
+// 17. "gpu_count" is a INT structure (defined below).
+// 18. "cpu_info" is a string pattern.
+// 19. "exceptions" is a list of entries.
+// 20. "features" is a list of gpu control list options, which can be
// configured by a specific list. See its *_json.cc file for a list of
// supported features. This field is mandatory.
// An "exceptions" list to the features can be added for convenience instead
// of listing all features except one.
-// 20. "description" has the description of the entry.
-// 21. "webkit_bugs" is an array of associated webkit bug numbers.
-// 22. "cr_bugs" is an array of associated webkit bug numbers.
-// 23. "disabled" is a boolean. If it is present, the entry will be skipped.
+// 21. "description" has the description of the entry.
+// 22. "webkit_bugs" is an array of associated webkit bug numbers.
+// 23. "cr_bugs" is an array of associated webkit bug numbers.
+// 24. "disabled" is a boolean. If it is present, the entry will be skipped.
// This can not be used in exceptions.
-// 24. "direct_rendering" is a boolean. If present, this will filter on whether
+// 25. "direct_rendering" is a boolean. If present, this will filter on whether
// the GL contexts are direct or indirect based on the value.
-// 25. "disabled_extensions" is a list of strings which contain the GL_EXTENSION
+// 26. "disabled_extensions" is a list of strings which contain the GL_EXTENSION
// strings which are disabled by the workaround.
-// 26. "pixel_shader_version" is a VERSION structure (defined below).
-// 27. "test_group" is an non-negative integer. If not specified, it defaults
+// 27. "pixel_shader_version" is a VERSION structure (defined below).
+// 28. "test_group" is an non-negative integer. If not specified, it defaults
// to 0, which is Chrome's blacklist. Any entries with a non-zero test_group
// ID will be appended on top of the default group entries if Chrome runs
// with --gpu-blacklist-test-group=ID.
-// 28. "intel_gpu_series" is a list of gpu series names. Currently supported
+// 29. "intel_gpu_series" is a list of gpu series names. Currently supported
// series include: "broadwater", "eaglelake", "ironlake", "sandybridge",
// "baytrail", "ivybridge", "haswell", "cherrytrail", "broadwell",
// "apollolake", "skylake", "geminilake", "kabylake", "coffeelake",
// "whiskeylake", "cometlake", "cannonlake", "icelake", "elkhartlake",
// "jasperlake", "tigerlake".
-// 29. "hardware_overlay" is either "supported" or "unsupported". Currently it
+// 30. "hardware_overlay" is either "supported" or "unsupported". Currently it
// only applies on Windows where hardware overlays may be supported on
// certain Intel GPUs. By default it's "dont_care" and there is no need to
// specify that.
-// 30. "intel_gpu_generation" is a VERSION structure. Each Intel GPU has a
+// 31. "intel_gpu_generation" is a VERSION structure. Each Intel GPU has a
// specific integer (meaning generation) associated.
-// 31. "subpixel_font_rendering" is either "supported" or "unsupported".
+// 32. "subpixel_font_rendering" is either "supported" or "unsupported".
// Currently it only applies on ChromeOS where subpixel font rendering
// causes a glitch on Mali GPUs. By default it's "dont_care" and there is
// no need to specify that.
-// 32. "driver_update_link" provides a link where affected users with older
+// 33. "driver_update_link" provides a link where affected users with older
// drivers can download a newer driver to avoid triggering this entry.
// Such link will be displayed in chrome://gpu for affected devices.
//
@@ -92,15 +98,15 @@
// be any of the following values: "=", "<", "<=", ">", ">=", "any", "between".
// "style" is optional and can be "lexical" or "numerical"; if it's not
// specified, it defaults to "numerical". "schema" is optional and can be
-// "common" or "intel_driver"; if it's not specified, it defaults to "common";
-// it's an error to specify "intel_driver" schema for entries that are not
-// specifically for Intel GPUs on Windows. "value2" is only used if "op" is
-// "between". "between" is "value <= * <= value2". "value" is used for all "op"
-// values except "any". "value" and "value2" are in the format of x, x.x,
-// x.x.x, etc.
+// "common", "intel_driver" or "nvidia_driver"; if it's not specified, it
+// defaults to "common"; it's an error to specify "(intel|nvidia)_driver" schema
+// for entries that are not specifically for Intel|Nvidia GPUs on Windows.
+// "value2" is only used if "op" is "between". "between" is
+// "value <= * <= value2". "value" is used for all "op" values except "any".
+// "value" and "value2" are in the format of x, x.x, x.x.x, etc.
// Only "driver_version" supports lexical style if the format is major.minor;
// in that case, major is still numerical, but minor is lexical.
-// Only "driver_version" supports "intel_driver" schema.
+// Only "driver_version" supports "(intel|nvidia)_driver" schema.
//
// FLOAT includes "op" "value", and "value2". "op" can be any of the
// following values: "=", "<", "<=", ">", ">=", "any", "between". "value2" is
diff --git a/chromium/gpu/config/gpu_control_list_testing.json b/chromium/gpu/config/gpu_control_list_testing.json
index ac413f1f2d2..f89624c144d 100644
--- a/chromium/gpu/config/gpu_control_list_testing.json
+++ b/chromium/gpu/config/gpu_control_list_testing.json
@@ -948,6 +948,47 @@
"features": [
"test_feature_0"
]
+ },
+ {
+ "id": 78,
+ "description": "GpuControlListEntryTest.DeviceRevisionEntry",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "exceptions": [
+ {
+ "device_id": ["0x15D8", "0x15DD"],
+ "device_revision": ["0x93", "0x86"],
+ "driver_version": {
+ "op": ">=",
+ "value": "26.20.15023.6032"
+ }
+ },
+ {
+ "device_id": ["0x15D8"],
+ "device_revision": ["0xE1", "0xE2"],
+ "driver_version": {
+ "op": ">=",
+ "value": "26.20.12055.1000"
+ }
+ }
+ ],
+ "features": [
+ "test_feature_0"
+ ]
+ },
+ {
+ "id": 79,
+ "description": "GpuControlListEntryTest.DeviceRevisionUnspecifiedEntry",
+ "os": {
+ "type": "win"
+ },
+ "vendor_id": "0x1002",
+ "device_id": ["0x15D8", "0x15DD"],
+ "features": [
+ "test_feature_0"
+ ]
}
]
}
diff --git a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
index 88b02cebb2a..acac88bd71c 100644
--- a/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_arrays_and_structs_autogen.h
@@ -28,8 +28,8 @@ const uint32_t kCrBugsForGpuControlTestingEntry1[2] = {
678,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry1[1] = {
- 0x0640,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry1[1] = {
+ {0x0640, 0x0},
};
const GpuControlList::DriverInfo kDriverInfoForGpuControlTestingEntry1 = {
@@ -192,9 +192,9 @@ const int kFeatureListForGpuControlTestingEntry6[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry6[2] = {
- 0x1023,
- 0x0640,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry6[2] = {
+ {0x1023, 0x0},
+ {0x0640, 0x0},
};
const GpuControlList::More kMoreForEntry6_1440601243 = {
@@ -718,8 +718,8 @@ const int kFeatureListForGpuControlTestingEntry25[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry25[1] = {
- 0x0640,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry25[1] = {
+ {0x0640, 0x0},
};
const GpuControlList::More kMoreForEntry25_1440601243 = {
@@ -935,8 +935,8 @@ const int kFeatureListForGpuControlTestingEntry30[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry30[1] = {
- 0x0166,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry30[1] = {
+ {0x0166, 0x0},
};
const GpuControlList::More kMoreForEntry30_1440601243 = {
@@ -962,8 +962,8 @@ const int kFeatureListForGpuControlTestingEntry31[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry31[1] = {
- 0x0640,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry31[1] = {
+ {0x0640, 0x0},
};
const GpuControlList::More kMoreForEntry31_1440601243 = {
@@ -989,8 +989,8 @@ const int kFeatureListForGpuControlTestingEntry32[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry32[1] = {
- 0x0166,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry32[1] = {
+ {0x0166, 0x0},
};
const GpuControlList::More kMoreForEntry32_1440601243 = {
@@ -1016,8 +1016,8 @@ const int kFeatureListForGpuControlTestingEntry33[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry33[1] = {
- 0x0166,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry33[1] = {
+ {0x0166, 0x0},
};
const GpuControlList::More kMoreForEntry33_1440601243 = {
@@ -1043,8 +1043,8 @@ const int kFeatureListForGpuControlTestingEntry34[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry34[1] = {
- 0x0166,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry34[1] = {
+ {0x0166, 0x0},
};
const GpuControlList::More kMoreForEntry34_1440601243 = {
@@ -1070,9 +1070,9 @@ const int kFeatureListForGpuControlTestingEntry35[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry35[2] = {
- 0x0166,
- 0x0168,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry35[2] = {
+ {0x0166, 0x0},
+ {0x0168, 0x0},
};
const GpuControlList::More kMoreForEntry35_1440601243 = {
@@ -1121,8 +1121,8 @@ const int kFeatureListForGpuControlTestingEntry37[1] = {
TEST_FEATURE_0,
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry37[1] = {
- 0x0640,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry37[1] = {
+ {0x0640, 0x0},
};
const GpuControlList::More kMoreForEntry37_1440601243 = {
@@ -1305,8 +1305,9 @@ const GpuControlList::More kMoreForEntry44_1440601243 = {
GpuControlList::kDontCare, // subpixel_font_rendering
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry44Exception0[1] = {
- 0x2a06,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry44Exception0[1] =
+ {
+ {0x2a06, 0x0},
};
const GpuControlList::DriverInfo
@@ -1336,8 +1337,9 @@ const GpuControlList::More kMoreForEntry44_1440601243Exception0 = {
GpuControlList::kDontCare, // subpixel_font_rendering
};
-const uint32_t kDeviceIDsForGpuControlTestingEntry44Exception1[1] = {
- 0x2a02,
+const GpuControlList::Device kDevicesForGpuControlTestingEntry44Exception1[1] =
+ {
+ {0x2a02, 0x0},
};
const GpuControlList::DriverInfo
@@ -2334,6 +2336,123 @@ const GpuControlList::More kMoreForEntry77_1440601243 = {
GpuControlList::kDontCare, // subpixel_font_rendering
};
+const int kFeatureListForGpuControlTestingEntry78[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::More kMoreForEntry78_1440601243 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
+const GpuControlList::Device kDevicesForGpuControlTestingEntry78Exception0[2] =
+ {
+ {0x15D8, 0x93},
+ {0x15DD, 0x86},
+};
+
+const GpuControlList::DriverInfo
+ kDriverInfoForGpuControlTestingEntry78Exception0 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kGE, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, "26.20.15023.6032",
+ nullptr}, // driver_version
+};
+
+const GpuControlList::More kMoreForEntry78_1440601243Exception0 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
+const GpuControlList::Device kDevicesForGpuControlTestingEntry78Exception1[2] =
+ {
+ {0x15D8, 0xE1},
+ {0x15D8, 0xE2},
+};
+
+const GpuControlList::DriverInfo
+ kDriverInfoForGpuControlTestingEntry78Exception1 = {
+ nullptr, // driver_vendor
+ {GpuControlList::kGE, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, "26.20.12055.1000",
+ nullptr}, // driver_version
+};
+
+const GpuControlList::More kMoreForEntry78_1440601243Exception1 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
+const int kFeatureListForGpuControlTestingEntry79[1] = {
+ TEST_FEATURE_0,
+};
+
+const GpuControlList::Device kDevicesForGpuControlTestingEntry79[2] = {
+ {0x15D8, 0x0},
+ {0x15DD, 0x0},
+};
+
+const GpuControlList::More kMoreForEntry79_1440601243 = {
+ GpuControlList::kGLTypeNone, // gl_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gl_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // pixel_shader_version
+ false, // in_process_gpu
+ 0, // gl_reset_notification_strategy
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // direct_rendering_version
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // gpu_count
+ GpuControlList::kDontCare, // hardware_overlay
+ 0, // test_group
+ GpuControlList::kDontCare, // subpixel_font_rendering
+};
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_ARRAYS_AND_STRUCTS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_testing_autogen.cc b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
index 64b9129fac4..e356b79cd81 100644
--- a/chromium/gpu/config/gpu_control_list_testing_autogen.cc
+++ b/chromium/gpu/config/gpu_control_list_testing_autogen.cc
@@ -31,10 +31,10 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kEQ, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, "10.6.4",
- nullptr}, // os_version
- 0x10de, // vendor_id
- base::size(kDeviceIDsForGpuControlTestingEntry1), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry1, // DeviceIDs
+ nullptr}, // os_version
+ 0x10de, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry1), // Devices size
+ kDevicesForGpuControlTestingEntry1, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry1, // driver info
@@ -67,8 +67,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x10de, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -101,8 +101,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x10de, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -135,8 +135,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -169,8 +169,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -201,10 +201,10 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsAny, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x10de, // vendor_id
- base::size(kDeviceIDsForGpuControlTestingEntry6), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry6, // DeviceIDs
+ nullptr}, // os_version
+ 0x10de, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry6), // Devices size
+ kDevicesForGpuControlTestingEntry6, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -237,8 +237,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -271,8 +271,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -305,8 +305,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -339,8 +339,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -373,8 +373,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -407,8 +407,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -441,8 +441,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -475,8 +475,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -509,8 +509,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -543,8 +543,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleOptimus, // multi_gpu_style
nullptr, // driver info
@@ -577,8 +577,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleAMDSwitchable, // multi_gpu_style
nullptr, // driver info
@@ -611,8 +611,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry18, // driver info
@@ -645,8 +645,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x1002, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry19, // driver info
@@ -679,8 +679,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry20, // driver info
@@ -713,8 +713,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -747,8 +747,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -781,8 +781,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -815,8 +815,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -847,18 +847,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x10de, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry25), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry25, // DeviceIDs
- GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x10de, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry25), // Devices size
+ kDevicesForGpuControlTestingEntry25, // Devices
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -884,8 +883,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -918,8 +917,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -952,8 +951,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -986,8 +985,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1018,18 +1017,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x8086, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry30), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry30, // DeviceIDs
- GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x8086, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry30), // Devices size
+ kDevicesForGpuControlTestingEntry30, // Devices
+ GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -1053,18 +1051,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x10de, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry31), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry31, // DeviceIDs
- GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x10de, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry31), // Devices size
+ kDevicesForGpuControlTestingEntry31, // Devices
+ GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -1088,11 +1085,10 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x8086, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry32), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry32, // DeviceIDs
+ nullptr}, // os_version
+ 0x8086, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry32), // Devices size
+ kDevicesForGpuControlTestingEntry32, // Devices
GpuControlList::kMultiGpuCategorySecondary, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1123,18 +1119,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x8086, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry33), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry33, // DeviceIDs
- GpuControlList::kMultiGpuCategoryPrimary, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x8086, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry33), // Devices size
+ kDevicesForGpuControlTestingEntry33, // Devices
+ GpuControlList::kMultiGpuCategoryPrimary, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -1158,18 +1153,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x8086, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry34), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry34, // DeviceIDs
- GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x8086, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry34), // Devices size
+ kDevicesForGpuControlTestingEntry34, // Devices
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -1193,18 +1187,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x8086, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry35), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry35, // DeviceIDs
- GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x8086, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry35), // Devices size
+ kDevicesForGpuControlTestingEntry35, // Devices
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -1230,8 +1223,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1262,18 +1255,17 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kOsMacosx, // os_type
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
- nullptr}, // os_version
- 0x10de, // vendor_id
- base::size(
- kDeviceIDsForGpuControlTestingEntry37), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry37, // DeviceIDs
- GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
- GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
- nullptr, // driver info
- nullptr, // GL strings
- nullptr, // machine model info
- 0, // intel_gpu_series size
- nullptr, // intel_gpu_series
+ nullptr}, // os_version
+ 0x10de, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry37), // Devices size
+ kDevicesForGpuControlTestingEntry37, // Devices
+ GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // intel_gpu_generation
@@ -1299,8 +1291,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x10de, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1333,8 +1325,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1367,8 +1359,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, "4.2",
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1401,8 +1393,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1435,8 +1427,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1468,8 +1460,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
{GpuControlList::kGE, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, "6", nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1502,8 +1494,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1536,8 +1528,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::
kMultiGpuStyleAMDSwitchableDiscrete, // multi_gpu_style
@@ -1571,8 +1563,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::
kMultiGpuStyleAMDSwitchableIntegrated, // multi_gpu_style
@@ -1606,8 +1598,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1640,8 +1632,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1674,8 +1666,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x10de, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry49, // driver info
@@ -1708,8 +1700,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x10de, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry50, // driver info
@@ -1742,8 +1734,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1776,8 +1768,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1810,8 +1802,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry53, // driver info
@@ -1844,8 +1836,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1878,8 +1870,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1912,8 +1904,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1946,8 +1938,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, "3.19.1",
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -1980,8 +1972,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2014,8 +2006,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2048,8 +2040,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2082,8 +2074,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2116,8 +2108,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2150,8 +2142,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryPrimary, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2184,8 +2176,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategorySecondary, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2218,8 +2210,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2252,8 +2244,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry66, // driver info
@@ -2286,8 +2278,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2320,8 +2312,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2354,8 +2346,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryActive, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2388,8 +2380,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryAny, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2422,8 +2414,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryPrimary, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2456,8 +2448,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategorySecondary, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2490,8 +2482,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2524,8 +2516,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -2558,8 +2550,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry75, // driver info
@@ -2592,8 +2584,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry76, // driver info
@@ -2626,8 +2618,8 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
GpuControlList::kVersionSchemaCommon, nullptr,
nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry77, // driver info
@@ -2643,6 +2635,74 @@ const GpuControlList::Entry kGpuControlListTestingEntries[] = {
0, // exceptions count
nullptr, // exceptions
},
+ {
+ 78, // id
+ "GpuControlListEntryTest.DeviceRevisionEntry",
+ base::size(kFeatureListForGpuControlTestingEntry78), // features size
+ kFeatureListForGpuControlTestingEntry78, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // DisabledWebGLExtensions size
+ nullptr, // DisabledWebGLExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // os_version
+ 0x1002, // vendor_id
+ 0, // Devices size
+ nullptr, // Devices
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // intel_gpu_generation
+ &kMoreForEntry78_1440601243, // more data
+ },
+ base::size(kExceptionsForEntry78), // exceptions count
+ kExceptionsForEntry78, // exceptions
+ },
+ {
+ 79, // id
+ "GpuControlListEntryTest.DeviceRevisionUnspecifiedEntry",
+ base::size(kFeatureListForGpuControlTestingEntry79), // features size
+ kFeatureListForGpuControlTestingEntry79, // features
+ 0, // DisabledExtensions size
+ nullptr, // DisabledExtensions
+ 0, // DisabledWebGLExtensions size
+ nullptr, // DisabledWebGLExtensions
+ 0, // CrBugs size
+ nullptr, // CrBugs
+ {
+ GpuControlList::kOsWin, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // os_version
+ 0x1002, // vendor_id
+ base::size(kDevicesForGpuControlTestingEntry79), // Devices size
+ kDevicesForGpuControlTestingEntry79, // Devices
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ nullptr, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // intel_gpu_generation
+ &kMoreForEntry79_1440601243, // more data
+ },
+ 0, // exceptions count
+ nullptr, // exceptions
+ },
};
-const size_t kGpuControlListTestingEntryCount = 77;
+const size_t kGpuControlListTestingEntryCount = 79;
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
index fc12d787732..7b1a8fecf3e 100644
--- a/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_entry_enums_autogen.h
@@ -90,6 +90,8 @@ enum GpuControlListTestingEntryEnum {
kGpuControlListEntryTest_IntelDriverVendorEntry = 74,
kGpuControlListEntryTest_IntelDriverVersionEntry = 75,
kGpuControlListEntryTest_IntelOldDriverVersionEntry = 76,
+ kGpuControlListEntryTest_DeviceRevisionEntry = 77,
+ kGpuControlListEntryTest_DeviceRevisionUnspecifiedEntry = 78,
};
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
index 355d7b165d0..fa66aa4aff2 100644
--- a/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
+++ b/chromium/gpu/config/gpu_control_list_testing_exceptions_autogen.h
@@ -18,8 +18,8 @@ const GpuControlList::Conditions kExceptionsForEntry4[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x10de, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -40,8 +40,8 @@ const GpuControlList::Conditions kExceptionsForEntry5[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x8086, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -62,8 +62,8 @@ const GpuControlList::Conditions kExceptionsForEntry21[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -84,8 +84,8 @@ const GpuControlList::Conditions kExceptionsForEntry27[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -106,8 +106,8 @@ const GpuControlList::Conditions kExceptionsForEntry29[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -129,8 +129,8 @@ const GpuControlList::Conditions kExceptionsForEntry44[2] = {
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x8086, // vendor_id
base::size(
- kDeviceIDsForGpuControlTestingEntry44Exception0), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry44Exception0, // DeviceIDs
+ kDevicesForGpuControlTestingEntry44Exception0), // Devices size
+ kDevicesForGpuControlTestingEntry44Exception0, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry44Exception0, // driver info
@@ -149,8 +149,8 @@ const GpuControlList::Conditions kExceptionsForEntry44[2] = {
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x8086, // vendor_id
base::size(
- kDeviceIDsForGpuControlTestingEntry44Exception1), // DeviceIDs size
- kDeviceIDsForGpuControlTestingEntry44Exception1, // DeviceIDs
+ kDevicesForGpuControlTestingEntry44Exception1), // Devices size
+ kDevicesForGpuControlTestingEntry44Exception1, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
&kDriverInfoForGpuControlTestingEntry44Exception1, // driver info
@@ -171,8 +171,8 @@ const GpuControlList::Conditions kExceptionsForEntry51[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -193,8 +193,8 @@ const GpuControlList::Conditions kExceptionsForEntry65[1] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -216,8 +216,8 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -235,8 +235,8 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -254,8 +254,8 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
{GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
0x00, // vendor_id
- 0, // DeviceIDs size
- nullptr, // DeviceIDs
+ 0, // Devices size
+ nullptr, // Devices
GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
nullptr, // driver info
@@ -270,6 +270,49 @@ const GpuControlList::Conditions kExceptionsForEntry73[3] = {
},
};
+const GpuControlList::Conditions kExceptionsForEntry78[2] = {
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
+ 0x1002, // vendor_id
+ base::size(
+ kDevicesForGpuControlTestingEntry78Exception0), // Devices size
+ kDevicesForGpuControlTestingEntry78Exception0, // Devices
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForGpuControlTestingEntry78Exception0, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // intel_gpu_generation
+ &kMoreForEntry78_1440601243Exception0, // more data
+ },
+ {
+ GpuControlList::kOsAny, // os_type
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr, nullptr}, // os_version
+ 0x1002, // vendor_id
+ base::size(
+ kDevicesForGpuControlTestingEntry78Exception1), // Devices size
+ kDevicesForGpuControlTestingEntry78Exception1, // Devices
+ GpuControlList::kMultiGpuCategoryNone, // multi_gpu_category
+ GpuControlList::kMultiGpuStyleNone, // multi_gpu_style
+ &kDriverInfoForGpuControlTestingEntry78Exception1, // driver info
+ nullptr, // GL strings
+ nullptr, // machine model info
+ 0, // intel_gpu_series size
+ nullptr, // intel_gpu_series
+ {GpuControlList::kUnknown, GpuControlList::kVersionStyleNumerical,
+ GpuControlList::kVersionSchemaCommon, nullptr,
+ nullptr}, // intel_gpu_generation
+ &kMoreForEntry78_1440601243Exception1, // more data
+ },
+};
+
} // namespace gpu
#endif // GPU_CONFIG_GPU_CONTROL_LIST_TESTING_EXCEPTIONS_AUTOGEN_H_
diff --git a/chromium/gpu/config/gpu_control_list_version_unittest.cc b/chromium/gpu/config/gpu_control_list_version_unittest.cc
index 896b081aa1d..2c7dd2468e3 100644
--- a/chromium/gpu/config/gpu_control_list_version_unittest.cc
+++ b/chromium/gpu/config/gpu_control_list_version_unittest.cc
@@ -16,6 +16,7 @@ constexpr auto kLexical = GpuControlList::kVersionStyleLexical;
constexpr auto kCommon = GpuControlList::kVersionSchemaCommon;
constexpr auto kIntelDriver = GpuControlList::kVersionSchemaIntelDriver;
+constexpr auto kNvidiaDriver = GpuControlList::kVersionSchemaNvidiaDriver;
constexpr auto kBetween = GpuControlList::kBetween;
constexpr auto kEQ = GpuControlList::kEQ;
@@ -219,4 +220,51 @@ TEST_F(VersionTest, IntelDriverSchema) {
}
}
+TEST_F(VersionTest, NvidiaDriverSchema) {
+ {
+ // Nvidia drivers, XX.XX.XXXA.AABB, only AAA.BB is considered. The version
+ // is specified as "AAA.BB" or "AAA" in the workaround file.
+ {
+ // "AAA.BB" should exactly specify one version.
+ Version info = {kLT, kNumerical, kNvidiaDriver, "234.56", nullptr};
+ EXPECT_TRUE(info.Contains("26.10.0012.3455"));
+ EXPECT_TRUE(info.Contains("00.00.0012.3455"));
+ EXPECT_TRUE(info.Contains("00.00.012.3455"));
+ EXPECT_TRUE(info.Contains("00.00.12.3455"));
+ EXPECT_FALSE(info.Contains("26.10.0012.3456"));
+ EXPECT_FALSE(info.Contains("26.10.012.3456"));
+ EXPECT_FALSE(info.Contains("26.10.12.3456"));
+ EXPECT_FALSE(info.Contains("26.10.0012.3457"));
+ EXPECT_FALSE(info.Contains("00.00.0012.3457"));
+ EXPECT_TRUE(info.Contains("26.10.0012.2457"));
+ EXPECT_TRUE(info.Contains("26.10.0011.3457"));
+
+ // Leading zeros in the third stanza are okay.
+ EXPECT_TRUE(info.Contains("26.10.0002.3455"));
+ EXPECT_FALSE(info.Contains("26.10.0002.3456"));
+ EXPECT_FALSE(info.Contains("26.10.0002.3457"));
+ EXPECT_TRUE(info.Contains("26.10.0010.3457"));
+ EXPECT_TRUE(info.Contains("26.10.0000.3457"));
+
+ // Missing zeros in the fourth stanza are replaced.
+ EXPECT_TRUE(info.Contains("26.10.0012.455"));
+ EXPECT_TRUE(info.Contains("26.10.0012.57"));
+ EXPECT_FALSE(info.Contains("26.10.0013.456"));
+ EXPECT_FALSE(info.Contains("26.10.0013.57"));
+
+ // Too short is rejected.
+ EXPECT_FALSE(info.Contains("26.10..57"));
+ EXPECT_FALSE(info.Contains("26.10.100"));
+ EXPECT_FALSE(info.Contains("26.10.100."));
+ }
+
+ {
+ // "AAA" should allow "AAA.*"
+ Version info = {kEQ, kNumerical, kNvidiaDriver, "234", nullptr};
+ EXPECT_FALSE(info.Contains("26.10.0012.3556"));
+ EXPECT_TRUE(info.Contains("26.10.0012.3456"));
+ }
+ }
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_driver_bug_list.json b/chromium/gpu/config/gpu_driver_bug_list.json
index 450c929ea75..8d4d155a777 100644
--- a/chromium/gpu/config/gpu_driver_bug_list.json
+++ b/chromium/gpu/config/gpu_driver_bug_list.json
@@ -2273,12 +2273,13 @@
}
},
"features": [
- "disable_accelerated_vpx_decode"
+ "disable_accelerated_vp8_decode",
+ "disable_accelerated_vp9_decode"
]
},
{
"id": 225,
- "description": "VPx decoding is too slow on Intel Broadwell, Skylake, and CherryTrail",
+ "description": "VP9 decoding is too slow on Intel Broadwell, Skylake, and CherryTrail",
"cr_bugs": [616318],
"os": {
"type": "win"
@@ -2289,12 +2290,12 @@
"cherrytrail"
],
"features": [
- "disable_accelerated_vpx_decode"
+ "disable_accelerated_vp9_decode"
]
},
{
"id": 226,
- "description": "Accelerated VPx decoding is hanging on some videos.",
+ "description": "Accelerated VP9 decoding is hanging on some videos.",
"cr_bugs": [654111],
"os": {
"type": "win"
@@ -2305,7 +2306,7 @@
"value": "21.20.16.4542"
},
"features": [
- "disable_accelerated_vpx_decode"
+ "disable_accelerated_vp9_decode"
]
},
{
@@ -3468,11 +3469,16 @@
{
"id": 328,
"cr_bugs": [1041166],
- "description": "Disable D3D11VideoDecoder due to crashes on NVIDIA",
+ "description": "Disable D3D11VideoDecoder due to crashes on NVIDIA on older drivers",
"os": {
"type": "win"
},
"vendor_id": "0x10de",
+ "driver_version": {
+ "schema": "nvidia_driver",
+ "op": "<",
+ "value": "451.48"
+ },
"features": [
"disable_d3d11_video_decoder"
]
@@ -3534,7 +3540,7 @@
{
"id": 336,
"cr_bugs": [625785],
- "description": "DXVA video decoder crashes on some AMD GPUs",
+ "description": "DXVA video decoder crashes on some AMD GPUs.",
"os": {
"type": "win"
},
@@ -3604,7 +3610,7 @@
},
{
"id": 339,
- "description": "NV12 textures trigger crash on Intel driver 20.19.15.*",
+ "description": "Array textures trigger crash on Intel driver 20.19.15.*",
"cr_bugs": [971952],
"os": {
"type": "win",
@@ -3623,12 +3629,12 @@
"value2": "20.19.15.4380"
},
"features": [
- "disable_nv12_dxgi_video"
+ "disable_dxgi_zero_copy_video"
]
},
{
"id": 340,
- "description": "NV12 textures trigger crash on Intel driver 10.18.15.*",
+ "description": "Array textures trigger crash on Intel driver 10.18.15.*",
"cr_bugs": [971952],
"os": {
"type": "win",
@@ -3647,7 +3653,50 @@
"value2": "10.18.15.4293"
},
"features": [
- "disable_nv12_dxgi_video"
+ "disable_dxgi_zero_copy_video"
+ ]
+ },
+ {
+ "id": 341,
+ "description": "Driver crash deleting FBOs on Mac Intel Broadwell",
+ "cr_bugs": [1090584],
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x8086",
+ "intel_gpu_series": [
+ "broadwell"
+ ],
+ "features": [
+ "unbind_attachments_on_bound_render_fbo_delete"
+ ]
+ },
+ {
+ "id": 342,
+ "description": "Driver crash deleting FBOs on Mac nVidia 600/700 series",
+ "cr_bugs": [1090584],
+ "os": {
+ "type": "macosx"
+ },
+ "vendor_id": "0x10de",
+ "device_id": ["0x0fe9", "0x0fd5", "0x0fd8", "0x119e", "0x0fea", "0x11a2"],
+ "features": [
+ "unbind_attachments_on_bound_render_fbo_delete"
+ ]
+ },
+ {
+ "id": 344,
+ "description": "VP8 decoding crashes before Windows 10 Fall Creators Update.",
+ "cr_bugs": [1094840],
+ "os": {
+ "type": "win",
+ "version": {
+ "op": "<",
+ "value": "10.0.16299"
+ }
+ },
+ "features": [
+ "disable_accelerated_vp8_decode"
]
}
]
diff --git a/chromium/gpu/config/gpu_finch_features.cc b/chromium/gpu/config/gpu_finch_features.cc
index 6728228533a..c17e656898a 100644
--- a/chromium/gpu/config/gpu_finch_features.cc
+++ b/chromium/gpu/config/gpu_finch_features.cc
@@ -45,12 +45,6 @@ const base::Feature kDefaultEnableOopRasterization{
"DefaultEnableOopRasterization", base::FEATURE_DISABLED_BY_DEFAULT};
#endif
-// Allow putting a video swapchain underneath the main swapchain, so overlays
-// can be used even if there are controls on top of the video. It can be
-// enabled only when overlay is supported.
-const base::Feature kDirectCompositionUnderlays{
- "DirectCompositionUnderlays", base::FEATURE_ENABLED_BY_DEFAULT};
-
#if defined(OS_WIN)
// Use a high priority for GPU process on Windows.
const base::Feature kGpuProcessHighPriorityWin{
diff --git a/chromium/gpu/config/gpu_finch_features.h b/chromium/gpu/config/gpu_finch_features.h
index f2167ba9894..e9367d82a1d 100644
--- a/chromium/gpu/config/gpu_finch_features.h
+++ b/chromium/gpu/config/gpu_finch_features.h
@@ -26,8 +26,6 @@ GPU_EXPORT extern const base::Feature kDefaultEnableGpuRasterization;
GPU_EXPORT extern const base::Feature kDefaultEnableOopRasterization;
-GPU_EXPORT extern const base::Feature kDirectCompositionUnderlays;
-
#if defined(OS_WIN)
GPU_EXPORT extern const base::Feature kGpuProcessHighPriorityWin;
#endif
diff --git a/chromium/gpu/config/gpu_info.cc b/chromium/gpu/config/gpu_info.cc
index 4a60da24429..6e55eae87ef 100644
--- a/chromium/gpu/config/gpu_info.cc
+++ b/chromium/gpu/config/gpu_info.cc
@@ -5,6 +5,7 @@
#include <stdint.h>
#include "base/logging.h"
+#include "base/notreached.h"
#include "gpu/command_buffer/common/gpu_memory_buffer_support.h"
#include "gpu/config/gpu_info.h"
#include "gpu/config/gpu_util.h"
@@ -111,18 +112,6 @@ void EnumerateImageDecodeAcceleratorSupportedProfile(
}
#if defined(OS_WIN)
-void EnumerateDx12VulkanVersionInfo(const gpu::Dx12VulkanVersionInfo& info,
- gpu::GPUInfo::Enumerator* enumerator) {
- enumerator->BeginDx12VulkanVersionInfo();
- enumerator->AddBool("supportsDx12", info.supports_dx12);
- enumerator->AddBool("supportsVulkan", info.supports_vulkan);
- enumerator->AddString("dx12FeatureLevel",
- gpu::D3DFeatureLevelToString(info.d3d12_feature_level));
- enumerator->AddString("vulkanVersion",
- gpu::VulkanVersionToString(info.vulkan_version));
- enumerator->EndDx12VulkanVersionInfo();
-}
-
void EnumerateOverlayInfo(const gpu::OverlayInfo& info,
gpu::GPUInfo::Enumerator* enumerator) {
enumerator->BeginOverlayInfo();
@@ -280,7 +269,8 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
#endif // OS_MACOSX
#if defined(OS_WIN)
DxDiagNode dx_diagnostics;
- Dx12VulkanVersionInfo dx12_vulkan_version_info;
+ uint32_t d3d12_feature_level;
+ uint32_t vulkan_version;
OverlayInfo overlay_info;
#endif
@@ -346,7 +336,12 @@ void GPUInfo::EnumerateFields(Enumerator* enumerator) const {
// TODO(kbr): add dx_diagnostics on Windows.
#if defined(OS_WIN)
EnumerateOverlayInfo(overlay_info, enumerator);
- EnumerateDx12VulkanVersionInfo(dx12_vulkan_version_info, enumerator);
+ enumerator->AddBool("supportsDx12", d3d12_feature_level != 0);
+ enumerator->AddBool("supportsVulkan", vulkan_version != 0);
+ enumerator->AddString("dx12FeatureLevel",
+ gpu::D3DFeatureLevelToString(d3d12_feature_level));
+ enumerator->AddString("vulkanVersion",
+ gpu::VulkanVersionToString(vulkan_version));
#endif
enumerator->AddInt("videoDecodeAcceleratorFlags",
video_decode_accelerator_capabilities.flags);
diff --git a/chromium/gpu/config/gpu_info.h b/chromium/gpu/config/gpu_info.h
index c8e756a6781..42bb614d83b 100644
--- a/chromium/gpu/config/gpu_info.h
+++ b/chromium/gpu/config/gpu_info.h
@@ -23,6 +23,10 @@
#include "gpu/vulkan/buildflags.h"
#include "ui/gfx/geometry/size.h"
+#if defined(OS_WIN)
+#include <dxgi.h>
+#endif
+
#if BUILDFLAG(ENABLE_VULKAN)
#include "gpu/config/vulkan_info.h"
#endif
@@ -187,29 +191,15 @@ enum class OverlaySupport {
GPU_EXPORT const char* OverlaySupportToString(OverlaySupport support);
-struct GPU_EXPORT Dx12VulkanVersionInfo {
- bool IsEmpty() const { return !d3d12_feature_level && !vulkan_version; }
-
- // True if the GPU driver supports DX12.
- bool supports_dx12 = false;
-
- // True if the GPU driver supports Vulkan.
- bool supports_vulkan = false;
-
- // The supported d3d feature level in the gpu driver;
- uint32_t d3d12_feature_level = 0;
-
- // The support Vulkan API version in the gpu driver;
- uint32_t vulkan_version = 0;
-};
-
struct GPU_EXPORT OverlayInfo {
OverlayInfo& operator=(const OverlayInfo& other) = default;
bool operator==(const OverlayInfo& other) const {
return direct_composition == other.direct_composition &&
supports_overlays == other.supports_overlays &&
yuy2_overlay_support == other.yuy2_overlay_support &&
- nv12_overlay_support == other.nv12_overlay_support;
+ nv12_overlay_support == other.nv12_overlay_support &&
+ bgra8_overlay_support == other.bgra8_overlay_support &&
+ rgb10a2_overlay_support == other.rgb10a2_overlay_support;
}
bool operator!=(const OverlayInfo& other) const { return !(*this == other); }
@@ -220,6 +210,8 @@ struct GPU_EXPORT OverlayInfo {
bool supports_overlays = false;
OverlaySupport yuy2_overlay_support = OverlaySupport::kNone;
OverlaySupport nv12_overlay_support = OverlaySupport::kNone;
+ OverlaySupport bgra8_overlay_support = OverlaySupport::kNone;
+ OverlaySupport rgb10a2_overlay_support = OverlaySupport::kNone;
};
#endif
@@ -251,10 +243,19 @@ struct GPU_EXPORT GPUInfo {
// The graphics card revision number.
uint32_t revision = 0u;
+
+ // The graphics card LUID. This is a unique identifier for the graphics card
+ // that is guaranteed to be unique until the computer is restarted. The LUID
+ // is used over the vendor id and device id because the device id is only
+ // unique relative its vendor, not to each other. If there are more than one
+ // of the same exact graphics card, they all have the same vendor id and
+ // device id but different LUIDs.
+ LUID luid;
#endif // OS_WIN
// Whether this GPU is the currently used one.
- // Currently this field is only supported and meaningful on OS X.
+ // Currently this field is only supported and meaningful on OS X and on
+ // Windows using Angle with D3D11.
bool active = false;
// The strings that describe the GPU.
@@ -378,7 +379,11 @@ struct GPU_EXPORT GPUInfo {
// The information returned by the DirectX Diagnostics Tool.
DxDiagNode dx_diagnostics;
- Dx12VulkanVersionInfo dx12_vulkan_version_info;
+ // The supported d3d feature level in the gpu driver;
+ uint32_t d3d12_feature_level = 0;
+
+ // The support Vulkan API version in the gpu driver;
+ uint32_t vulkan_version = 0;
// The GPU hardware overlay info.
OverlayInfo overlay_info;
@@ -446,9 +451,6 @@ struct GPU_EXPORT GPUInfo {
virtual void BeginAuxAttributes() = 0;
virtual void EndAuxAttributes() = 0;
- virtual void BeginDx12VulkanVersionInfo() = 0;
- virtual void EndDx12VulkanVersionInfo() = 0;
-
virtual void BeginOverlayInfo() = 0;
virtual void EndOverlayInfo() = 0;
diff --git a/chromium/gpu/config/gpu_info_collector.cc b/chromium/gpu/config/gpu_info_collector.cc
index 66e061f6020..cfdcdcaf632 100644
--- a/chromium/gpu/config/gpu_info_collector.cc
+++ b/chromium/gpu/config/gpu_info_collector.cc
@@ -344,7 +344,14 @@ bool CollectGraphicsInfoGL(GPUInfo* gpu_info) {
gpu_info->pixel_shader_version = glsl_version;
gpu_info->vertex_shader_version = glsl_version;
- IdentifyActiveGPU(gpu_info);
+ bool active_gpu_identified = false;
+#if defined(OS_WIN)
+ active_gpu_identified = IdentifyActiveGPUWithLuid(gpu_info);
+#endif // OS_WIN
+
+ if (!active_gpu_identified)
+ IdentifyActiveGPU(gpu_info);
+
return true;
}
diff --git a/chromium/gpu/config/gpu_info_collector.h b/chromium/gpu/config/gpu_info_collector.h
index b2423096815..dfa92445ebb 100644
--- a/chromium/gpu/config/gpu_info_collector.h
+++ b/chromium/gpu/config/gpu_info_collector.h
@@ -45,8 +45,11 @@ GPU_EXPORT bool CollectContextGraphicsInfo(GPUInfo* gpu_info);
#if defined(OS_WIN)
// Collect the DirectX Disagnostics information about the attached displays.
GPU_EXPORT bool GetDxDiagnostics(DxDiagNode* output);
-GPU_EXPORT void RecordGpuSupportedRuntimeVersionHistograms(
- Dx12VulkanVersionInfo* dx12_vulkan_version_info);
+GPU_EXPORT uint32_t GetGpuSupportedD3D12Version();
+GPU_EXPORT void RecordGpuSupportedDx12VersionHistograms(
+ uint32_t d3d12_feature_level);
+GPU_EXPORT uint32_t
+GetGpuSupportedVulkanVersion(const gpu::GPUInfo::GPUDevice& gpu_device);
// Iterate through all adapters and create a hardware D3D11 device on each
// adapter. If succeeded, query the highest feature level it supports and
@@ -59,6 +62,9 @@ GPU_EXPORT bool CollectD3D11FeatureInfo(D3D_FEATURE_LEVEL* d3d11_feature_level,
// Collect the hardware overlay support flags.
GPU_EXPORT void CollectHardwareOverlayInfo(OverlayInfo* overlay_info);
+
+// Identify the active GPU based on LUIDs.
+bool IdentifyActiveGPUWithLuid(GPUInfo* gpu_info);
#endif // OS_WIN
// Create a GL context and collect GL strings and versions.
diff --git a/chromium/gpu/config/gpu_info_collector_win.cc b/chromium/gpu/config/gpu_info_collector_win.cc
index f70d2abe277..f43a7e9ad8a 100644
--- a/chromium/gpu/config/gpu_info_collector_win.cc
+++ b/chromium/gpu/config/gpu_info_collector_win.cc
@@ -35,6 +35,8 @@
#include "gpu/config/gpu_util.h"
#include "third_party/vulkan_headers/include/vulkan/vulkan.h"
#include "ui/gl/direct_composition_surface_win.h"
+#include "ui/gl/gl_angle_util_win.h"
+#include "ui/gl/gl_surface_egl.h"
namespace gpu {
@@ -83,6 +85,32 @@ OverlaySupport FlagsToOverlaySupport(bool overlays_supported, UINT flags) {
return OverlaySupport::kNone;
}
+bool GetActiveAdapterLuid(LUID* luid) {
+ Microsoft::WRL::ComPtr<ID3D11Device> d3d11_device =
+ gl::QueryD3D11DeviceObjectFromANGLE();
+ if (!d3d11_device)
+ return false;
+
+ Microsoft::WRL::ComPtr<IDXGIDevice> dxgi_device;
+ if (FAILED(d3d11_device.As(&dxgi_device)))
+ return false;
+
+ Microsoft::WRL::ComPtr<IDXGIAdapter> adapter;
+ if (FAILED(dxgi_device->GetAdapter(&adapter)))
+ return false;
+
+ DXGI_ADAPTER_DESC desc;
+ if (FAILED(adapter->GetDesc(&desc)))
+ return false;
+
+ // Zero isn't a valid LUID.
+ if (desc.AdapterLuid.HighPart == 0 && desc.AdapterLuid.LowPart == 0)
+ return false;
+
+ *luid = desc.AdapterLuid;
+ return true;
+}
+
} // namespace
#if BUILDFLAG(GOOGLE_CHROME_BRANDING) && defined(OFFICIAL_BUILD)
@@ -118,14 +146,22 @@ void CollectHardwareOverlayInfo(OverlayInfo* overlay_info) {
overlay_info->supports_overlays,
gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
DXGI_FORMAT_YUY2));
+ overlay_info->bgra8_overlay_support = FlagsToOverlaySupport(
+ overlay_info->supports_overlays,
+ gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
+ DXGI_FORMAT_B8G8R8A8_UNORM));
+ overlay_info->rgb10a2_overlay_support = FlagsToOverlaySupport(
+ overlay_info->supports_overlays,
+ gl::DirectCompositionSurfaceWin::GetOverlaySupportFlags(
+ DXGI_FORMAT_R10G10B10A2_UNORM));
}
}
bool CollectDriverInfoD3D(GPUInfo* gpu_info) {
TRACE_EVENT0("gpu", "CollectDriverInfoD3D");
- Microsoft::WRL::ComPtr<IDXGIFactory> dxgi_factory;
- HRESULT hr = ::CreateDXGIFactory(IID_PPV_ARGS(&dxgi_factory));
+ Microsoft::WRL::ComPtr<IDXGIFactory1> dxgi_factory;
+ HRESULT hr = ::CreateDXGIFactory1(IID_PPV_ARGS(&dxgi_factory));
if (FAILED(hr))
return false;
@@ -144,6 +180,7 @@ bool CollectDriverInfoD3D(GPUInfo* gpu_info) {
device.device_id = desc.DeviceId;
device.sub_sys_id = desc.SubSysId;
device.revision = desc.Revision;
+ device.luid = desc.AdapterLuid;
LARGE_INTEGER umd_version;
hr = dxgi_adapter->CheckInterfaceSupport(__uuidof(IDXGIDevice),
@@ -199,15 +236,13 @@ bool CollectDriverInfoD3D(GPUInfo* gpu_info) {
}
// DirectX 12 are included with Windows 10 and Server 2016.
-void GetGpuSupportedD3D12Version(Dx12VulkanVersionInfo* info) {
+uint32_t GetGpuSupportedD3D12Version() {
TRACE_EVENT0("gpu", "GetGpuSupportedD3D12Version");
- info->supports_dx12 = false;
- info->d3d12_feature_level = 0;
base::ScopedNativeLibrary d3d12_library(
base::FilePath(FILE_PATH_LITERAL("d3d12.dll")));
if (!d3d12_library.is_valid())
- return;
+ return 0;
// The order of feature levels to attempt to create in D3D CreateDevice
const D3D_FEATURE_LEVEL feature_levels[] = {
@@ -224,14 +259,19 @@ void GetGpuSupportedD3D12Version(Dx12VulkanVersionInfo* info) {
for (auto level : feature_levels) {
if (SUCCEEDED(D3D12CreateDevice(nullptr, level, _uuidof(ID3D12Device),
nullptr))) {
- info->d3d12_feature_level = level;
- info->supports_dx12 = (level >= D3D_FEATURE_LEVEL_12_0) ? true : false;
- break;
+ return level;
}
}
}
+ return 0;
}
+// The old graphics drivers are installed to the Windows system directory
+// c:\windows\system32 or SysWOW64. Those versions can be detected without
+// specifying the absolute directory. For a newer version (>= ~2018), this won't
+// work. The newer graphics drivers are located in
+// c:\windows\system32\DriverStore\FileRepository\xxx.infxxx which contains a
+// different number at each installation
bool BadAMDVulkanDriverVersion() {
// Both 32-bit and 64-bit dll are broken. If 64-bit doesn't exist,
// 32-bit dll will be used to detect the AMD Vulkan driver.
@@ -260,6 +300,31 @@ bool BadAMDVulkanDriverVersion() {
return false;
}
+// Vulkan 1.1 was released by the Khronos Group on March 7, 2018.
+// Blacklist all driver versions without Vulkan 1.1 support and those that cause
+// lots of crashes.
+bool BadGraphicsDriverVersions(const gpu::GPUInfo::GPUDevice& gpu_device) {
+ // GPU Device info is not available in gpu_integration_test.info-collection
+ // with --no-delay-for-dx12-vulkan-info-collection.
+ if (gpu_device.driver_version.empty())
+ return false;
+
+ base::Version driver_version(gpu_device.driver_version);
+ if (!driver_version.IsValid())
+ return true;
+
+ // AMD Vulkan drivers - amdvlk64.dll
+ constexpr uint32_t kAMDVendorId = 0x1002;
+ if (gpu_device.vendor_id == kAMDVendorId) {
+ // 26.20.12028.2 (2019)- number of crashes 1,188,048 as of 5/14/2020.
+ // Returns -1, 0, 1 for <, ==, >.
+ if (driver_version.CompareTo(base::Version("26.20.12028.2")) == 0)
+ return true;
+ }
+
+ return false;
+}
+
bool InitVulkan(base::NativeLibrary* vulkan_library,
PFN_vkGetInstanceProcAddr* vkGetInstanceProcAddr,
PFN_vkCreateInstance* vkCreateInstance,
@@ -340,11 +405,9 @@ bool InitVulkanInstanceProc(
return false;
}
-void GetGpuSupportedVulkanVersionAndExtensions(
- Dx12VulkanVersionInfo* info,
- const std::vector<const char*>& requested_vulkan_extensions,
- std::vector<bool>* extension_support) {
- TRACE_EVENT0("gpu", "GetGpuSupportedVulkanVersionAndExtensions");
+uint32_t GetGpuSupportedVulkanVersion(
+ const gpu::GPUInfo::GPUDevice& gpu_device) {
+ TRACE_EVENT0("gpu", "GetGpuSupportedVulkanVersion");
base::NativeLibrary vulkan_library;
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
@@ -353,20 +416,26 @@ void GetGpuSupportedVulkanVersionAndExtensions(
PFN_vkEnumerateDeviceExtensionProperties vkEnumerateDeviceExtensionProperties;
VkInstance vk_instance = VK_NULL_HANDLE;
uint32_t physical_device_count = 0;
- info->supports_vulkan = false;
- info->vulkan_version = 0;
// Skip if the system has an older AMD Vulkan driver amdvlk64.dll or
// amdvlk32.dll which crashes when vkCreateInstance() is called. This bug has
// been fixed in the latest AMD driver.
+ // Detected by the file version of amdvlk64.dll.
if (BadAMDVulkanDriverVersion()) {
- return;
+ return 0;
}
+ // Don't collect any info if the graphics vulkan driver is blacklisted or
+ // doesn't support Vulkan 1.1
+ // Detected by the graphic driver version returned by DXGI
+ if (BadGraphicsDriverVersions(gpu_device))
+ return 0;
+
// Only supports a version >= 1.1.0.
+ uint32_t vulkan_version = 0;
if (!InitVulkan(&vulkan_library, &vkGetInstanceProcAddr, &vkCreateInstance,
- &info->vulkan_version)) {
- return;
+ &vulkan_version)) {
+ return 0;
}
VkApplicationInfo app_info = {};
@@ -382,7 +451,7 @@ void GetGpuSupportedVulkanVersionAndExtensions(
create_info.ppEnabledExtensionNames = enabled_instance_extensions.data();
// Get the Vulkan API version supported in the GPU driver
- int highest_minor_version = VK_VERSION_MINOR(info->vulkan_version);
+ int highest_minor_version = VK_VERSION_MINOR(vulkan_version);
for (int minor_version = highest_minor_version; minor_version >= 1;
--minor_version) {
app_info.apiVersion = VK_MAKE_VERSION(1, minor_version, 0);
@@ -394,9 +463,7 @@ void GetGpuSupportedVulkanVersionAndExtensions(
result = vkEnumeratePhysicalDevices(vk_instance, &physical_device_count,
nullptr);
if (result == VK_SUCCESS && physical_device_count > 0) {
- info->supports_vulkan = true;
- info->vulkan_version = app_info.apiVersion;
- break;
+ return app_info.apiVersion;
} else {
// Skip destroy here. GPU process shutdown will unload all loaded DLLs.
// vkDestroyInstance(vk_instance, nullptr);
@@ -405,37 +472,6 @@ void GetGpuSupportedVulkanVersionAndExtensions(
}
}
- // Check whether the requested_vulkan_extensions are supported
- if (info->supports_vulkan) {
- std::vector<VkPhysicalDevice> physical_devices(physical_device_count);
- vkEnumeratePhysicalDevices(vk_instance, &physical_device_count,
- physical_devices.data());
-
- // physical_devices[0]: Only query the default device for now
- uint32_t property_count;
- vkEnumerateDeviceExtensionProperties(physical_devices[0], nullptr,
- &property_count, nullptr);
-
- std::vector<VkExtensionProperties> extension_properties(property_count);
- if (property_count > 0) {
- vkEnumerateDeviceExtensionProperties(physical_devices[0], nullptr,
- &property_count,
- extension_properties.data());
- }
-
- for (size_t i = 0; i < requested_vulkan_extensions.size(); ++i) {
- for (size_t p = 0; p < property_count; ++p) {
- if (strcmp(requested_vulkan_extensions[i],
- extension_properties[p].extensionName) == 0) {
- (*extension_support)[i] = true;
- break;
- }
- }
- }
- } else {
- info->vulkan_version = VK_MAKE_VERSION(1, 0, 0);
- }
-
// From the crash reports, calling the following two functions might cause a
// crash in the Vulkan loader or in the Vulkan driver. To work around it,
// don't explicitly unload the DLL. Instead, GPU process shutdown will unload
@@ -444,40 +480,22 @@ void GetGpuSupportedVulkanVersionAndExtensions(
// vkDestroyInstance(vk_instance, nullptr);
// }
// base::UnloadNativeLibrary(vulkan_library);
+ return 0;
}
-void RecordGpuSupportedRuntimeVersionHistograms(Dx12VulkanVersionInfo* info) {
- // D3D
- GetGpuSupportedD3D12Version(info);
- UMA_HISTOGRAM_BOOLEAN("GPU.SupportsDX12", info->supports_dx12);
+void RecordGpuSupportedDx12VersionHistograms(uint32_t d3d12_feature_level) {
+ bool supports_dx12 =
+ (d3d12_feature_level >= D3D_FEATURE_LEVEL_12_0) ? true : false;
+ UMA_HISTOGRAM_BOOLEAN("GPU.SupportsDX12", supports_dx12);
UMA_HISTOGRAM_ENUMERATION(
"GPU.D3D12FeatureLevel",
- ConvertToHistogramFeatureLevel(info->d3d12_feature_level));
-
- // Vulkan
- const std::vector<const char*> vulkan_extensions = {
- "VK_KHR_external_memory_win32", "VK_KHR_external_semaphore_win32",
- "VK_KHR_win32_keyed_mutex"};
- std::vector<bool> extension_support(vulkan_extensions.size(), false);
- GetGpuSupportedVulkanVersionAndExtensions(info, vulkan_extensions,
- &extension_support);
-
- UMA_HISTOGRAM_BOOLEAN("GPU.SupportsVulkan", info->supports_vulkan);
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.VulkanVersion",
- ConvertToHistogramVulkanVersion(info->vulkan_version));
-
- for (size_t i = 0; i < vulkan_extensions.size(); ++i) {
- std::string name = "GPU.VulkanExtSupport.";
- name.append(vulkan_extensions[i]);
- base::UmaHistogramBoolean(name, extension_support[i]);
- }
+ ConvertToHistogramFeatureLevel(d3d12_feature_level));
}
bool CollectD3D11FeatureInfo(D3D_FEATURE_LEVEL* d3d11_feature_level,
bool* has_discrete_gpu) {
- Microsoft::WRL::ComPtr<IDXGIFactory> dxgi_factory;
- if (FAILED(::CreateDXGIFactory(IID_PPV_ARGS(&dxgi_factory))))
+ Microsoft::WRL::ComPtr<IDXGIFactory1> dxgi_factory;
+ if (FAILED(::CreateDXGIFactory1(IID_PPV_ARGS(&dxgi_factory))))
return false;
base::ScopedNativeLibrary d3d11_library(
@@ -614,4 +632,30 @@ bool CollectBasicGraphicsInfo(GPUInfo* gpu_info) {
return CollectDriverInfoD3D(gpu_info);
}
+bool IdentifyActiveGPUWithLuid(GPUInfo* gpu_info) {
+ LUID luid;
+ if (!GetActiveAdapterLuid(&luid))
+ return false;
+
+ gpu_info->gpu.active = false;
+ for (size_t i = 0; i < gpu_info->secondary_gpus.size(); i++)
+ gpu_info->secondary_gpus[i].active = false;
+
+ if (gpu_info->gpu.luid.HighPart == luid.HighPart &&
+ gpu_info->gpu.luid.LowPart == luid.LowPart) {
+ gpu_info->gpu.active = true;
+ return true;
+ }
+
+ for (size_t i = 0; i < gpu_info->secondary_gpus.size(); i++) {
+ if (gpu_info->secondary_gpus[i].luid.HighPart == luid.HighPart &&
+ gpu_info->secondary_gpus[i].luid.LowPart == luid.LowPart) {
+ gpu_info->secondary_gpus[i].active = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_info_unittest.cc b/chromium/gpu/config/gpu_info_unittest.cc
index 13bb5d38eff..3bd092c8fdc 100644
--- a/chromium/gpu/config/gpu_info_unittest.cc
+++ b/chromium/gpu/config/gpu_info_unittest.cc
@@ -16,7 +16,6 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
video_decode_accelerator_profile_active_(false),
video_encode_accelerator_profile_active_(false),
image_decode_accelerator_profile_active_(false),
- dx12_vulkan_version_info_active_(false),
overlay_info_active_(false),
aux_attributes_active_(false) {}
@@ -63,14 +62,6 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
image_decode_accelerator_profile_active_ = false;
}
- void BeginDx12VulkanVersionInfo() override {
- dx12_vulkan_version_info_active_ = true;
- }
-
- void EndDx12VulkanVersionInfo() override {
- dx12_vulkan_version_info_active_ = false;
- }
-
void BeginOverlayInfo() override { overlay_info_active_ = true; }
void EndOverlayInfo() override { overlay_info_active_ = false; }
@@ -94,10 +85,6 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
return image_decode_accelerator_profile_active_;
}
- bool dx12_vulkan_version_info_active() const {
- return dx12_vulkan_version_info_active_;
- }
-
bool aux_attributes_active() const { return aux_attributes_active_; }
private:
@@ -105,7 +92,6 @@ class TestGPUInfoEnumerator : public gpu::GPUInfo::Enumerator {
bool video_decode_accelerator_profile_active_;
bool video_encode_accelerator_profile_active_;
bool image_decode_accelerator_profile_active_;
- bool dx12_vulkan_version_info_active_;
bool overlay_info_active_;
bool aux_attributes_active_;
};
@@ -121,7 +107,6 @@ TEST(GpuInfoTest, FieldEditStates) {
EXPECT_FALSE(enumerator.video_decode_accelerator_profile_active());
EXPECT_FALSE(enumerator.video_encode_accelerator_profile_active());
EXPECT_FALSE(enumerator.image_decode_accelerator_profile_active());
- EXPECT_FALSE(enumerator.dx12_vulkan_version_info_active());
EXPECT_FALSE(enumerator.aux_attributes_active());
}
diff --git a/chromium/gpu/config/gpu_lists_version.h b/chromium/gpu/config/gpu_lists_version.h
index 7fa92c79085..1dd6f587fda 100644
--- a/chromium/gpu/config/gpu_lists_version.h
+++ b/chromium/gpu/config/gpu_lists_version.h
@@ -3,6 +3,6 @@
#ifndef GPU_CONFIG_GPU_LISTS_VERSION_H_
#define GPU_CONFIG_GPU_LISTS_VERSION_H_
-#define GPU_LISTS_VERSION "80c974bf7990b9735a8e885046fc5c9b1da4796c"
+#define GPU_LISTS_VERSION "59840fa678c084e98201a428c7db996326e0c749"
#endif // GPU_CONFIG_GPU_LISTS_VERSION_H_
diff --git a/chromium/gpu/config/gpu_preferences_unittest.cc b/chromium/gpu/config/gpu_preferences_unittest.cc
index f2e0078f182..3cc5ecc9b74 100644
--- a/chromium/gpu/config/gpu_preferences_unittest.cc
+++ b/chromium/gpu/config/gpu_preferences_unittest.cc
@@ -5,8 +5,10 @@
#include <algorithm>
#include <cstring>
+#include "base/command_line.h"
#include "base/message_loop/message_pump_type.h"
#include "build/build_config.h"
+#include "gpu/config/gpu_switches.h"
#include "gpu/ipc/common/gpu_preferences.mojom.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -166,8 +168,7 @@ TEST(GpuPreferencesTest, EncodeDecode) {
GPU_PREFERENCES_FIELD(enable_oop_rasterization, true)
GPU_PREFERENCES_FIELD(disable_oop_rasterization, true)
GPU_PREFERENCES_FIELD(watchdog_starts_backgrounded, true)
- GPU_PREFERENCES_FIELD_ENUM(gr_context_type,
- GrContextType::kVulkan,
+ GPU_PREFERENCES_FIELD_ENUM(gr_context_type, GrContextType::kVulkan,
mojom::GrContextType::kVulkan)
GPU_PREFERENCES_FIELD_ENUM(use_vulkan, VulkanImplementationName::kNative,
mojom::VulkanImplementationName::kNative)
@@ -196,4 +197,84 @@ TEST(GpuPreferencesTest, EncodeDecode) {
}
}
+// Helper test for decoding GPU preferences from a crash dump string.
+TEST(GpuPreferencesTest, DISABLED_DecodePreferences) {
+ auto* command_line = base::CommandLine::ForCurrentProcess();
+ if (!command_line->HasSwitch(switches::kGpuPreferences)) {
+ LOG(ERROR) << "Please specify the preferences to decode via "
+ << switches::kGpuPreferences;
+ return;
+ }
+
+ const auto preferences =
+ command_line->GetSwitchValueASCII(switches::kGpuPreferences);
+
+ gpu::GpuPreferences gpu_preferences;
+ if (!gpu_preferences.FromSwitchValue(preferences)) {
+ LOG(ERROR) << "Failed to decode preferences: " << preferences;
+ return;
+ }
+
+ printf("GpuPreferences = {\n");
+#define PRINT_BOOL(key) \
+ printf(" %s: %s\n", #key, gpu_preferences.key ? "true" : "false")
+#define PRINT_INT(key) \
+ printf(" %s: %d\n", #key, static_cast<uint32_t>(gpu_preferences.key))
+
+ PRINT_BOOL(disable_accelerated_video_decode);
+ PRINT_BOOL(disable_accelerated_video_encode);
+ PRINT_BOOL(gpu_startup_dialog);
+ PRINT_BOOL(disable_gpu_watchdog);
+ PRINT_BOOL(gpu_sandbox_start_early);
+ PRINT_BOOL(enable_low_latency_dxva);
+ PRINT_BOOL(enable_zero_copy_dxgi_video);
+ PRINT_BOOL(enable_nv12_dxgi_video);
+ PRINT_BOOL(enable_media_foundation_vea_on_windows7);
+ PRINT_BOOL(disable_software_rasterizer);
+ PRINT_BOOL(log_gpu_control_list_decisions);
+ PRINT_BOOL(compile_shader_always_succeeds);
+ PRINT_BOOL(disable_gl_error_limit);
+ PRINT_BOOL(disable_glsl_translator);
+ PRINT_BOOL(disable_shader_name_hashing);
+ PRINT_BOOL(enable_gpu_command_logging);
+ PRINT_BOOL(enable_gpu_debugging);
+ PRINT_BOOL(enable_gpu_service_logging_gpu);
+ PRINT_BOOL(enable_gpu_driver_debug_logging);
+ PRINT_BOOL(disable_gpu_program_cache);
+ PRINT_BOOL(enforce_gl_minimums);
+ PRINT_INT(force_gpu_mem_available_bytes);
+ PRINT_INT(force_gpu_mem_discardable_limit_bytes);
+ PRINT_INT(gpu_program_cache_size);
+ PRINT_BOOL(disable_gpu_shader_disk_cache);
+ PRINT_BOOL(enable_threaded_texture_mailboxes);
+ PRINT_BOOL(gl_shader_interm_output);
+ PRINT_BOOL(emulate_shader_precision);
+ PRINT_BOOL(enable_gpu_service_logging);
+ PRINT_BOOL(enable_gpu_service_tracing);
+ PRINT_BOOL(use_passthrough_cmd_decoder);
+ PRINT_BOOL(disable_biplanar_gpu_memory_buffers_for_video_frames);
+ for (size_t i = 0; i < gpu_preferences.texture_target_exception_list.size();
+ ++i) {
+ PRINT_INT(texture_target_exception_list[i].usage);
+ PRINT_INT(texture_target_exception_list[i].format);
+ }
+ PRINT_BOOL(ignore_gpu_blacklist);
+ PRINT_BOOL(enable_oop_rasterization);
+ PRINT_BOOL(disable_oop_rasterization);
+ PRINT_BOOL(watchdog_starts_backgrounded);
+ PRINT_INT(gr_context_type);
+ PRINT_INT(use_vulkan);
+ PRINT_BOOL(enable_gpu_benchmarking_extension);
+ PRINT_BOOL(enable_webgpu);
+ PRINT_BOOL(enable_dawn_backend_validation);
+ PRINT_BOOL(enable_gpu_blocked_time_metric);
+ PRINT_BOOL(enable_perf_data_collection);
+#if defined(USE_OZONE)
+ PRINT_INT(message_pump_type);
+#endif
+ PRINT_BOOL(enable_native_gpu_memory_buffers);
+ PRINT_BOOL(force_disable_new_accelerated_video_decoder);
+ printf("}\n");
+}
+
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_switches.cc b/chromium/gpu/config/gpu_switches.cc
index c44e30735c0..301e97583d0 100644
--- a/chromium/gpu/config/gpu_switches.cc
+++ b/chromium/gpu/config/gpu_switches.cc
@@ -14,7 +14,7 @@ const char kDisableGpuRasterization[] = "disable-gpu-rasterization";
// Skia GPU backend. Only valid with GPU accelerated compositing.
const char kEnableGpuRasterization[] = "enable-gpu-rasterization";
-// Select a different set of GPU blacklist entries with the specificed
+// Select a different set of GPU blacklist entries with the specified
// test_group ID.
const char kGpuBlacklistTestGroup[] = "gpu-blacklist-test-group";
diff --git a/chromium/gpu/config/gpu_util.cc b/chromium/gpu/config/gpu_util.cc
index d5517a075ff..066cef555d2 100644
--- a/chromium/gpu/config/gpu_util.cc
+++ b/chromium/gpu/config/gpu_util.cc
@@ -20,6 +20,7 @@
#include "base/command_line.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
+#include "base/notreached.h"
#include "base/path_service.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
@@ -170,6 +171,15 @@ GpuFeatureStatus GetGpuRasterizationFeatureStatus(
if (blacklisted_features.count(GPU_FEATURE_TYPE_GPU_RASTERIZATION))
return kGpuFeatureStatusBlacklisted;
+ // Enable gpu rasterization for vulkan, unless it is overridden by
+ // commandline.
+ if (base::FeatureList::IsEnabled(features::kVulkan) &&
+ !base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine(
+ features::kDefaultEnableGpuRasterization.name,
+ base::FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+ return kGpuFeatureStatusEnabled;
+ }
+
// Gpu Rasterization on platforms that are not fully enabled is controlled by
// a finch experiment.
if (!base::FeatureList::IsEnabled(features::kDefaultEnableGpuRasterization))
@@ -207,11 +217,14 @@ GpuFeatureStatus GetOopRasterizationFeatureStatus(
else if (gpu_preferences.enable_oop_rasterization)
return kGpuFeatureStatusEnabled;
- // TODO(enne): Eventually oop rasterization will replace gpu rasterization,
- // and so we will need to address the underlying bugs or turn of GPU
- // rasterization for these cases.
- if (blacklisted_features.count(GPU_FEATURE_TYPE_OOP_RASTERIZATION))
- return kGpuFeatureStatusBlacklisted;
+ // Enable OOP rasterization for vulkan, unless it is overridden by
+ // commandline.
+ if (base::FeatureList::IsEnabled(features::kVulkan) &&
+ !base::FeatureList::GetInstance()->IsFeatureOverriddenFromCommandLine(
+ features::kDefaultEnableOopRasterization.name,
+ base::FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+ return kGpuFeatureStatusEnabled;
+ }
// OOP Rasterization on platforms that are not fully enabled is controlled by
// a finch experiment.
@@ -1021,21 +1034,4 @@ std::string VulkanVersionToString(uint32_t vulkan_version) {
}
}
#endif // OS_WIN
-
-VulkanVersion ConvertToHistogramVulkanVersion(uint32_t vulkan_version) {
- if (vulkan_version < VK_MAKE_VERSION(1, 0, 0))
- return VulkanVersion::kVulkanVersionUnknown;
- else if (vulkan_version < VK_MAKE_VERSION(1, 1, 0))
- return VulkanVersion::kVulkanVersion_1_0_0;
- else if (vulkan_version < VK_MAKE_VERSION(1, 2, 0))
- return VulkanVersion::kVulkanVersion_1_1_0;
- else if (vulkan_version < VK_MAKE_VERSION(1, 3, 0))
- return VulkanVersion::kVulkanVersion_1_2_0;
- else {
- // Need to add 1.3.0+ to enum VulkanVersion.
- NOTREACHED();
- return VulkanVersion::kVulkanVersion_1_2_0;
- }
-}
-
} // namespace gpu
diff --git a/chromium/gpu/config/gpu_util.h b/chromium/gpu/config/gpu_util.h
index b4086559520..22ba88d991f 100644
--- a/chromium/gpu/config/gpu_util.h
+++ b/chromium/gpu/config/gpu_util.h
@@ -97,20 +97,6 @@ GPU_EXPORT std::string D3DFeatureLevelToString(uint32_t d3d_feature_level);
GPU_EXPORT std::string VulkanVersionToString(uint32_t vulkan_version);
#endif // OS_WIN
-// These values are persisted to logs. Entries should not be renumbered and
-// numeric values should never be reused.
-// This should match enum VulkanVersion in \tools\metrics\histograms\enums.xml
-enum class VulkanVersion {
- kVulkanVersionUnknown = 0,
- kVulkanVersion_1_0_0 = 1,
- kVulkanVersion_1_1_0 = 2,
- kVulkanVersion_1_2_0 = 3,
- kMaxValue = kVulkanVersion_1_2_0,
-};
-
-GPU_EXPORT VulkanVersion
-ConvertToHistogramVulkanVersion(uint32_t vulkan_version);
-
} // namespace gpu
#endif // GPU_CONFIG_GPU_UTIL_H_
diff --git a/chromium/gpu/config/gpu_workaround_list.txt b/chromium/gpu/config/gpu_workaround_list.txt
index fd5b9bb5e2a..5edf0d7b0ca 100644
--- a/chromium/gpu/config/gpu_workaround_list.txt
+++ b/chromium/gpu/config/gpu_workaround_list.txt
@@ -12,7 +12,9 @@ count_all_in_varyings_packing
decode_encode_srgb_for_generatemipmap
depth_stencil_renderbuffer_resize_emulation
disable_2d_canvas_auto_flush
-disable_accelerated_vpx_decode
+disable_accelerated_av1_decode
+disable_accelerated_vp8_decode
+disable_accelerated_vp9_decode
disable_async_readpixels
disable_av_sample_buffer_display_layer
disable_blend_equation_advanced
@@ -118,6 +120,7 @@ use_es2_for_oopr
use_gpu_driver_workaround_for_testing
use_intermediary_for_copy_texture_image
use_non_zero_size_for_client_side_stream_buffers
+use_single_video_decoder_texture
use_unused_standard_shared_blocks
use_virtualized_gl_contexts
validate_multisample_buffer_allocation
diff --git a/chromium/gpu/config/process_json.py b/chromium/gpu/config/process_json.py
index 1ec57b6a483..31c53351a3d 100755
--- a/chromium/gpu/config/process_json.py
+++ b/chromium/gpu/config/process_json.py
@@ -56,6 +56,16 @@ Legal: "24.20.100.7000", "0.0.100.7000", "0.0.0.7000", "0.0.100.0"
Illegal: "24.0.0.0", "24.20.0.0", "0.0.99.0"
'''
+NVIDIA_DRIVER_VERSION_SCHEMA = '''
+The version format used by Nvidia is ABC.DE, where A-E are any digit. When
+queried by Chrome, it will detect XX.XX.XXXA.BCDE, where 'X' is any digit and
+can be ignored. Chrome will re-format this to ABC.DE, and compare to the
+version listed here.
+
+So, Chrome might detect 26.21.0014.4575, which would be given here as 445.75 in
+the Nvidia version schema. The 26.21.001 is ignored.
+'''
+
def check_intel_driver_version(version):
ver_list = version.split('.')
@@ -68,6 +78,20 @@ def check_intel_driver_version(version):
return False
return True
+def check_nvidia_driver_version(version):
+ ver_list = version.split('.')
+ # Allow "456" to match "456.*", so allow a single-entry list.
+ if len(ver_list) == 0 or len(ver_list) > 2:
+ return False;
+ elif len(ver_list) == 2 and len(ver_list[1]) != 2:
+ return False
+ # Must start with three digits, whether it's "456.*" or "456.78".
+ if len(ver_list[0]) != 3:
+ return False
+ for ver in ver_list:
+ if not ver.isdigit():
+ return False
+ return True
def load_software_rendering_list_features(feature_type_filename):
header_file = open(feature_type_filename, 'r')
@@ -236,6 +260,7 @@ def write_version(version_info, name_tag, data_file):
schema_map = {
'common': 'Common',
'intel_driver': 'IntelDriver',
+ 'nvidia_driver': 'NvidiaDriver',
'': 'Common',
}
assert schema in schema_map
@@ -299,9 +324,48 @@ def write_string_value(string, name_tag, data_file):
def write_boolean_value(value, name_tag, data_file):
data_file.write('%s, // %s\n' % (str(value).lower(), name_tag))
+
def write_integer_value(value, name_tag, data_file):
data_file.write('%s, // %s\n' % (str(value), name_tag))
+
+def write_device_list(entry_id, device_id, device_revision, is_exception,
+ exception_id, unique_symbol_id, data_file,
+ data_helper_file):
+ if device_id:
+ # It's one of the three ways to specify devices:
+ # 1) only specify device IDs
+ # 2) specify one device ID, associated with multiple revisions
+ # 3) specify k device IDs associated with k device revisions.
+ device_size = len(device_id)
+ if device_size == 1 and device_revision and len(device_revision) > 1:
+ device_size = len(device_revision)
+ for ii in range(device_size - 1):
+ device_id.append(device_id[0])
+ if device_revision is None:
+ device_revision = []
+ for ii in range(device_size):
+ device_revision.append('0x0')
+ assert len(device_id) == len(device_revision)
+ var_name = 'kDevicesFor%sEntry%d' % (unique_symbol_id, entry_id)
+ if is_exception:
+ var_name += 'Exception' + str(exception_id)
+ # define the list
+ data_helper_file.write('const GpuControlList::Device %s[%d] = {\n' %
+ (var_name, len(device_id)))
+ for ii in range(device_size):
+ data_helper_file.write('{%s, %s},\n' %
+ (device_id[ii], device_revision[ii]))
+ data_helper_file.write('};\n\n')
+ # reference the list
+ data_file.write('base::size(%s), // Devices size\n' % var_name)
+ data_file.write('%s, // Devices\n' % var_name)
+ else:
+ assert not device_revision
+ data_file.write('0, // Devices size\n')
+ data_file.write('nullptr, // Devices\n')
+
+
def write_machine_model_info(entry_id, is_exception, exception_id,
machine_model_name, machine_model_version,
data_file, data_helper_file):
@@ -348,55 +412,56 @@ def write_os_type(os_type, data_file):
def write_multi_gpu_category(multi_gpu_category, data_file):
- map = {
+ suffix_for_category = {
'primary': 'Primary',
'secondary': 'Secondary',
'active': 'Active',
'any': 'Any',
'': 'None',
}
- assert multi_gpu_category in map
+ assert multi_gpu_category in suffix_for_category
data_file.write(
'GpuControlList::kMultiGpuCategory%s, // multi_gpu_category\n' %
- map[multi_gpu_category])
+ suffix_for_category[multi_gpu_category])
def write_multi_gpu_style(multi_gpu_style, data_file):
- map = {
+ suffix_for_style = {
'optimus': 'Optimus',
'amd_switchable': 'AMDSwitchable',
'amd_switchable_discrete': 'AMDSwitchableDiscrete',
'amd_switchable_integrated': 'AMDSwitchableIntegrated',
'': 'None',
}
- assert multi_gpu_style in map
+ assert multi_gpu_style in suffix_for_style
data_file.write(
'GpuControlList::kMultiGpuStyle%s, // multi_gpu_style\n' %
- map[multi_gpu_style])
+ suffix_for_style[multi_gpu_style])
def write_gl_type(gl_type, data_file):
- map = {
+ suffix_for_type = {
'gl': 'GL',
'gles': 'GLES',
'angle': 'ANGLE',
'': 'None',
}
- assert gl_type in map
- data_file.write('GpuControlList::kGLType%s, // gl_type\n' % map[gl_type])
+ assert gl_type in suffix_for_type
+ data_file.write('GpuControlList::kGLType%s, // gl_type\n' %
+ suffix_for_type[gl_type])
def write_supported_or_not(feature_value, feature_name, data_file):
if feature_value is None:
feature_value = 'dont_care'
- map = {
+ suffix_for_value = {
'supported': 'Supported',
'unsupported': 'Unsupported',
'dont_care': 'DontCare',
}
- assert feature_value in map
+ assert feature_value in suffix_for_value
data_file.write('GpuControlList::k%s, // %s\n' %
- (map[feature_value], feature_name))
+ (suffix_for_value[feature_value], feature_name))
def write_conditions(entry_id, is_exception, exception_id, entry,
@@ -406,6 +471,7 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
os_version = None
vendor_id = 0
device_id = None
+ device_revision = None
multi_gpu_category = ''
multi_gpu_style = ''
intel_gpu_series_list = None
@@ -467,6 +533,8 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
vendor_id = int(entry[key], 0)
elif key == 'device_id':
device_id = entry[key]
+ elif key == 'device_revision':
+ device_revision = entry[key]
elif key == 'multi_gpu_category':
multi_gpu_category = entry[key]
elif key == 'multi_gpu_style':
@@ -523,15 +591,14 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
write_version(os_version, 'os_version', data_file)
data_file.write(format(vendor_id, '#04x'))
data_file.write(', // vendor_id\n')
- write_number_list(entry_id, 'uint32_t', 'DeviceIDs', device_id, is_exception,
+ write_device_list(entry_id, device_id, device_revision, is_exception,
exception_id, unique_symbol_id, data_file,
data_helper_file)
write_multi_gpu_category(multi_gpu_category, data_file)
write_multi_gpu_style(multi_gpu_style, data_file)
# group driver info
if driver_vendor != '' or driver_version != None:
- if (driver_version and driver_version.has_key('schema') and
- driver_version['schema'] == 'intel_driver'):
+ if driver_version and driver_version.get('schema') == 'intel_driver':
assert os_type == 'win', 'Intel driver schema is only for Windows'
is_intel = (format(vendor_id, '#04x') == '0x8086' or
intel_gpu_series_list or
@@ -544,6 +611,16 @@ def write_conditions(entry_id, is_exception, exception_id, entry,
check_intel_driver_version(driver_version['value2']))
assert valid_version, INTEL_DRIVER_VERSION_SCHEMA
+ if driver_version and driver_version.get('schema') == 'nvidia_driver':
+ assert os_type == 'win', 'Nvidia driver schema is only for Windows'
+ is_nvidia = (format(vendor_id, '#04x') == '0x10de')
+ assert is_nvidia, 'Nvidia driver schema is only for Nvidia GPUs'
+ valid_version = check_nvidia_driver_version(driver_version['value'])
+ if driver_version.has_key('value2'):
+ valid_version = (valid_version and
+ check_nvidia_driver_version(driver_version['value2']))
+ assert valid_version, NVIDIA_DRIVER_VERSION_SCHEMA
+
write_driver_info(entry_id, is_exception, exception_id, driver_vendor,
driver_version, unique_symbol_id,
data_file, data_helper_file)
@@ -739,13 +816,13 @@ def format_files(generated_files):
call([formatter, "-i", "-style=chromium", filename])
-def write_header_file_guard(file, filename, path, begin):
+def write_header_file_guard(out_file, filename, path, begin):
token = (path.upper().replace('/', '_') + '_' +
filename.upper().replace('.', '_') + '_')
if begin:
- file.write('#ifndef %s\n#define %s\n\n' % (token, token))
+ out_file.write('#ifndef %s\n#define %s\n\n' % (token, token))
else:
- file.write('\n#endif // %s\n' % token)
+ out_file.write('\n#endif // %s\n' % token)
def process_json_file(json_filepath, list_tag,
diff --git a/chromium/gpu/config/software_rendering_list.json b/chromium/gpu/config/software_rendering_list.json
index e94ed4307c7..5b5c542dbc9 100644
--- a/chromium/gpu/config/software_rendering_list.json
+++ b/chromium/gpu/config/software_rendering_list.json
@@ -620,11 +620,12 @@
{
"id": 90,
"description": "Accelerated video decode interferes with GPU sandbox on certain NVIDIA drivers",
- "cr_bugs": [298968],
+ "cr_bugs": [298968, 1020137],
"os": {
"type": "win"
},
"vendor_id": "0x10de",
+ "multi_gpu_category": "any",
"driver_version": {
"comment": "INF_version: 8.17.12.5729, 8.17.12.8026; date: 05/22/2010, 08/03/2011",
"op": "between",
@@ -1549,21 +1550,69 @@
},
{
"id": 154,
- "description": "Protected video decoding with swap chain is for Windows and Intel only",
- "features": [
- "protected_video_decode"
- ],
+ "description": "Protected video decoding with swap chain is for certain Intel and AMD GPUs on Windows",
+ "cr_bugs": [1093625],
"exceptions": [
{
+ "vendor_id": "0x8086",
"os": {
"type": "win",
"version": {
"op": ">=",
- "value": "10.0"
+ "value": "10"
}
+ }
+ },
+ {
+ "vendor_id": "0x1002",
+ "device_id": ["0x15d8"],
+ "device_revision": ["0xe1", "0xe2"],
+ "driver_version": {
+ "op": ">=",
+ "value": "26.20.12055.1000"
},
- "vendor_id": "0x8086"
+ "os": {
+ "type": "win",
+ "version": {
+ "op": ">=",
+ "value": "10"
+ }
+ }
+ },
+ {
+ "vendor_id": "0x1002",
+ "device_id": ["0x15d8", "0x15dd"],
+ "device_revision": ["0x93", "0x86"],
+ "driver_version": {
+ "op": ">=",
+ "value": "26.20.15023.6032"
+ },
+ "os": {
+ "type": "win",
+ "version": {
+ "op": ">=",
+ "value": "10"
+ }
+ }
+ },
+ {
+ "vendor_id": "0x1002",
+ "device_id": ["0x15d8", "0x15dd"],
+ "driver_version": {
+ "op": ">=",
+ "value": "27.20.1002.34"
+ },
+ "os": {
+ "type": "win",
+ "version": {
+ "op": ">=",
+ "value": "10"
+ }
+ }
}
+ ],
+ "features": [
+ "protected_video_decode"
]
},
{
diff --git a/chromium/gpu/gles2_conform_support/generate_gles2_conform_tests.py b/chromium/gpu/gles2_conform_support/generate_gles2_conform_tests.py
index df2980a11a4..b6b49a128c3 100755
--- a/chromium/gpu/gles2_conform_support/generate_gles2_conform_tests.py
+++ b/chromium/gpu/gles2_conform_support/generate_gles2_conform_tests.py
@@ -11,9 +11,8 @@ import sys
def ReadFileAsLines(filename):
"""Reads a file, removing blank lines and lines that start with #"""
- file = open(filename, "r")
- raw_lines = file.readlines()
- file.close()
+ with open(filename, "r") as in_file:
+ raw_lines = in_file.readlines()
lines = []
for line in raw_lines:
line = line.strip()
@@ -22,19 +21,19 @@ def ReadFileAsLines(filename):
return lines
-def GenerateTests(file):
+def GenerateTests(out_file):
"""Generates gles2_conform_test_autogen.cc"""
tests = ReadFileAsLines(
"../../third_party/gles2_conform/GTF_ES/glsl/GTF/mustpass_es20.run")
- file.write("""
+ out_file.write("""
#include "gpu/gles2_conform_support/gles2_conform_test.h"
#include "testing/gtest/include/gtest/gtest.h"
""")
for test in tests:
- file.write("""
+ out_file.write("""
TEST(GLES2ConformTest, %(name)s) {
EXPECT_TRUE(RunGLES2ConformTest("%(path)s"));
}
@@ -48,13 +47,13 @@ def main(argv):
"""This is the main function."""
if len(argv) >= 1:
- dir = argv[0]
+ out_dir = argv[0]
else:
- dir = '.'
+ out_dir = '.'
- file = open(os.path.join(dir, 'gles2_conform_test_autogen.cc'), 'wb')
- GenerateTests(file)
- file.close()
+ out_filename = os.path.join(out_dir, 'gles2_conform_test_autogen.cc')
+ with open(out_filename, 'wb') as out_file:
+ GenerateTests(out_file)
return 0
diff --git a/chromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py b/chromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py
index 8d7330f551d..809b24a57bf 100755
--- a/chromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py
+++ b/chromium/gpu/gles2_conform_support/generate_gles2_embedded_data.py
@@ -59,14 +59,14 @@ class GenerateEmbeddedFiles(object):
def AddFiles(self, scan_dir):
"""Scan a folder and embed the contents of files."""
- files = os.listdir(scan_dir)
+ files_to_embed = os.listdir(scan_dir)
sub_dirs = []
- for file in files:
- full_path = os.path.join(scan_dir, file)
- ext = os.path.splitext(file)[1]
+ for file_to_embed in files_to_embed:
+ full_path = os.path.join(scan_dir, file_to_embed)
+ ext = os.path.splitext(file_to_embed)[1]
base_path = full_path[len(self.scan_dir) + 1:]
if os.path.isdir(full_path):
- if not file in GenerateEmbeddedFiles.paths_to_ignore:
+ if not file_to_embed in GenerateEmbeddedFiles.paths_to_ignore:
sub_dirs.append(full_path)
elif ext in GenerateEmbeddedFiles.extensions_to_include:
if self.base_dir == None:
diff --git a/chromium/gpu/ipc/client/client_shared_image_interface.cc b/chromium/gpu/ipc/client/client_shared_image_interface.cc
index f9a9acbd066..dbfb3874876 100644
--- a/chromium/gpu/ipc/client/client_shared_image_interface.cc
+++ b/chromium/gpu/ipc/client/client_shared_image_interface.cc
@@ -40,8 +40,10 @@ void ClientSharedImageInterface::PresentSwapChain(const SyncToken& sync_token,
#if defined(OS_FUCHSIA)
void ClientSharedImageInterface::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
- proxy_->RegisterSysmemBufferCollection(id, std::move(token));
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+ proxy_->RegisterSysmemBufferCollection(id, std::move(token), format, usage);
}
void ClientSharedImageInterface::ReleaseSysmemBufferCollection(
@@ -58,6 +60,11 @@ SyncToken ClientSharedImageInterface::GenVerifiedSyncToken() {
return proxy_->GenVerifiedSyncToken();
}
+void ClientSharedImageInterface::WaitSyncToken(
+ const gpu::SyncToken& sync_token) {
+ proxy_->WaitSyncToken(sync_token);
+}
+
void ClientSharedImageInterface::Flush() {
proxy_->Flush();
}
@@ -124,6 +131,12 @@ uint32_t ClientSharedImageInterface::UsageForMailbox(const Mailbox& mailbox) {
return proxy_->UsageForMailbox(mailbox);
}
+void ClientSharedImageInterface::NotifyMailboxAdded(const Mailbox& mailbox,
+ uint32_t usage) {
+ AddMailbox(mailbox);
+ proxy_->NotifyMailboxAdded(mailbox, usage);
+}
+
Mailbox ClientSharedImageInterface::AddMailbox(const gpu::Mailbox& mailbox) {
if (mailbox.IsZero())
return mailbox;
diff --git a/chromium/gpu/ipc/client/client_shared_image_interface.h b/chromium/gpu/ipc/client/client_shared_image_interface.h
index 78771d64ef4..64e6edca80a 100644
--- a/chromium/gpu/ipc/client/client_shared_image_interface.h
+++ b/chromium/gpu/ipc/client/client_shared_image_interface.h
@@ -32,11 +32,14 @@ class GPU_EXPORT ClientSharedImageInterface : public SharedImageInterface {
const Mailbox& mailbox) override;
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) override;
void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
#endif // defined(OS_FUCHSIA)
SyncToken GenUnverifiedSyncToken() override;
SyncToken GenVerifiedSyncToken() override;
+ void WaitSyncToken(const gpu::SyncToken& sync_token) override;
void Flush() override;
scoped_refptr<gfx::NativePixmap> GetNativePixmap(
const Mailbox& mailbox) override;
@@ -62,6 +65,7 @@ class GPU_EXPORT ClientSharedImageInterface : public SharedImageInterface {
void DestroySharedImage(const SyncToken& sync_token,
const Mailbox& mailbox) override;
uint32_t UsageForMailbox(const Mailbox& mailbox) override;
+ void NotifyMailboxAdded(const Mailbox& mailbox, uint32_t usage) override;
private:
Mailbox AddMailbox(const Mailbox& mailbox);
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
index 64b88276295..3085268e547 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.cc
@@ -260,6 +260,28 @@ SyncToken SharedImageInterfaceProxy::GenUnverifiedSyncToken() {
next_release_id_);
}
+void SharedImageInterfaceProxy::WaitSyncToken(const SyncToken& sync_token) {
+ if (!sync_token.HasData())
+ return;
+
+ std::vector<SyncToken> dependencies;
+ dependencies.push_back(sync_token);
+ SyncToken& new_token = dependencies.back();
+ if (!new_token.verified_flush()) {
+ // Only allow unverified sync tokens for the same channel.
+ DCHECK_EQ(sync_token.namespace_id(), gpu::CommandBufferNamespace::GPU_IO);
+ int sync_token_channel_id =
+ ChannelIdFromCommandBufferId(sync_token.command_buffer_id());
+ DCHECK_EQ(sync_token_channel_id, host_->channel_id());
+ new_token.SetVerifyFlush();
+ }
+ {
+ base::AutoLock lock(lock_);
+ last_flush_id_ = host_->EnqueueDeferredMessage(GpuChannelMsg_Nop(),
+ std::move(dependencies));
+ }
+}
+
void SharedImageInterfaceProxy::Flush() {
base::AutoLock lock(lock_);
host_->EnsureFlush(last_flush_id_);
@@ -390,9 +412,11 @@ void SharedImageInterfaceProxy::PresentSwapChain(const SyncToken& sync_token,
#if defined(OS_FUCHSIA)
void SharedImageInterfaceProxy::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
- host_->Send(
- new GpuChannelMsg_RegisterSysmemBufferCollection(route_id_, id, token));
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
+ host_->Send(new GpuChannelMsg_RegisterSysmemBufferCollection(
+ route_id_, id, token, format, usage));
}
void SharedImageInterfaceProxy::ReleaseSysmemBufferCollection(
@@ -427,4 +451,10 @@ uint32_t SharedImageInterfaceProxy::UsageForMailbox(const Mailbox& mailbox) {
return it->second;
}
+void SharedImageInterfaceProxy::NotifyMailboxAdded(const Mailbox& mailbox,
+ uint32_t usage) {
+ base::AutoLock lock(lock_);
+ AddMailbox(mailbox, usage);
+}
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/client/shared_image_interface_proxy.h b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
index b22b61d3237..0ad687fde81 100644
--- a/chromium/gpu/ipc/client/shared_image_interface_proxy.h
+++ b/chromium/gpu/ipc/client/shared_image_interface_proxy.h
@@ -41,6 +41,7 @@ class SharedImageInterfaceProxy {
void DestroySharedImage(const SyncToken& sync_token, const Mailbox& mailbox);
SyncToken GenVerifiedSyncToken();
SyncToken GenUnverifiedSyncToken();
+ void WaitSyncToken(const SyncToken& sync_token);
void Flush();
SharedImageInterface::SwapChainMailboxes CreateSwapChain(
@@ -52,13 +53,16 @@ class SharedImageInterfaceProxy {
#if defined(OS_FUCHSIA)
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token);
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // defined(OS_FUCHSIA)
scoped_refptr<gfx::NativePixmap> GetNativePixmap(const gpu::Mailbox& mailbox);
uint32_t UsageForMailbox(const Mailbox& mailbox);
+ void NotifyMailboxAdded(const Mailbox& mailbox, uint32_t usage);
private:
bool GetSHMForPixelData(base::span<const uint8_t> pixel_data,
diff --git a/chromium/gpu/ipc/common/BUILD.gn b/chromium/gpu/ipc/common/BUILD.gn
index a8f3c602db5..51c88a3817c 100644
--- a/chromium/gpu/ipc/common/BUILD.gn
+++ b/chromium/gpu/ipc/common/BUILD.gn
@@ -214,6 +214,10 @@ mojom("interfaces") {
"vulkan_ycbcr_info.mojom",
]
+ if (is_win) {
+ sources += [ "luid.mojom" ]
+ }
+
public_deps = [
":gpu_preferences_interface",
"//mojo/public/mojom/base",
@@ -269,6 +273,24 @@ mojom("interfaces") {
},
]
+ if (is_win) {
+ shared_cpp_typemaps += [
+ {
+ types = [
+ {
+ mojom = "gpu.mojom.Luid"
+ cpp = "::LUID"
+ },
+ ]
+ traits_headers = [ "luid_mojom_traits.h" ]
+ traits_public_deps = [
+ ":mojom_traits",
+ "//gpu/config",
+ ]
+ },
+ ]
+ }
+
cpp_typemaps = shared_cpp_typemaps
blink_cpp_typemaps = shared_cpp_typemaps
@@ -364,10 +386,6 @@ mojom("interfaces") {
cpp = "::gpu::CollectInfoResult"
},
{
- mojom = "gpu.mojom.Dx12VulkanVersionInfo"
- cpp = "::gpu::Dx12VulkanVersionInfo"
- },
- {
mojom = "gpu.mojom.OverlayInfo"
cpp = "::gpu::OverlayInfo"
},
@@ -580,6 +598,9 @@ source_set("mojom_traits") {
if (is_android) {
sources += [ "vulkan_ycbcr_info_mojom_traits.h" ]
}
+ if (is_win) {
+ sources += [ "luid_mojom_traits.h" ]
+ }
if (enable_vulkan) {
deps += [ ":vulkan_types_mojom_traits" ]
}
diff --git a/chromium/gpu/ipc/common/PRESUBMIT.py b/chromium/gpu/ipc/common/PRESUBMIT.py
index fa602d4a0ad..98ecf4b0a9c 100644
--- a/chromium/gpu/ipc/common/PRESUBMIT.py
+++ b/chromium/gpu/ipc/common/PRESUBMIT.py
@@ -26,8 +26,8 @@ def CommonChecks(input_api, output_api):
if generated_files and not generating_files:
long_text = 'Changed files:\n'
- for file in generated_files:
- long_text += file.LocalPath() + '\n'
+ for generated_file in generated_files:
+ long_text += generated_file.LocalPath() + '\n'
long_text += '\n'
messages.append(output_api.PresubmitError(
'Vulkan types generated files changed but the generator '
diff --git a/chromium/gpu/ipc/common/gpu_info.mojom b/chromium/gpu/ipc/common/gpu_info.mojom
index 7dc59093d2b..ad90a1ca7e9 100644
--- a/chromium/gpu/ipc/common/gpu_info.mojom
+++ b/chromium/gpu/ipc/common/gpu_info.mojom
@@ -6,6 +6,8 @@
module gpu.mojom;
import "gpu/ipc/common/dx_diag_node.mojom";
+[EnableIf=is_win]
+import "gpu/ipc/common/luid.mojom";
import "mojo/public/mojom/base/time.mojom";
import "ui/gfx/geometry/mojom/geometry.mojom";
@@ -26,6 +28,8 @@ struct GpuDevice {
string driver_vendor;
string driver_version;
int32 cuda_compute_capability_major;
+ [EnableIf=is_win]
+ Luid luid;
};
// gpu::VideoCodecProfile
@@ -116,15 +120,6 @@ enum OverlaySupport {
SOFTWARE,
};
-// gpu::Dx12VulkanVersionInfo
-[EnableIf=is_win]
-struct Dx12VulkanVersionInfo {
- bool supports_dx12;
- bool supports_vulkan;
- uint32 d3d12_feature_level;
- uint32 vulkan_version;
-};
-
// gpu::OverlayInfo
[EnableIf=is_win]
struct OverlayInfo {
@@ -132,6 +127,8 @@ struct OverlayInfo {
bool supports_overlays;
OverlaySupport yuy2_overlay_support;
OverlaySupport nv12_overlay_support;
+ OverlaySupport bgra8_overlay_support;
+ OverlaySupport rgb10a2_overlay_support;
};
// Corresponds to |gpu::GPUInfo| in gpu/config/gpu_info.h
@@ -167,7 +164,9 @@ struct GpuInfo {
[EnableIf=is_win]
DxDiagNode dx_diagnostics;
[EnableIf=is_win]
- Dx12VulkanVersionInfo dx12_vulkan_version_info;
+ uint32 d3d12_feature_level;
+ [EnableIf=is_win]
+ uint32 vulkan_version;
[EnableIf=is_win]
OverlayInfo overlay_info;
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
index 3d7e314ba8c..1d967041854 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.cc
@@ -28,6 +28,9 @@ bool StructTraits<gpu::mojom::GpuDeviceDataView, gpu::GPUInfo::GPUDevice>::Read(
out->cuda_compute_capability_major = data.cuda_compute_capability_major();
return data.ReadVendorString(&out->vendor_string) &&
data.ReadDeviceString(&out->device_string) &&
+#if defined(OS_WIN)
+ data.ReadLuid(&out->luid) &&
+#endif // OS_WIN
data.ReadDriverVendor(&out->driver_vendor) &&
data.ReadDriverVersion(&out->driver_version);
}
@@ -355,25 +358,15 @@ bool EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport>::FromMojom(
return true;
}
-// static
-bool StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
- gpu::Dx12VulkanVersionInfo>::
- Read(gpu::mojom::Dx12VulkanVersionInfoDataView data,
- gpu::Dx12VulkanVersionInfo* out) {
- out->supports_dx12 = data.supports_dx12();
- out->supports_vulkan = data.supports_vulkan();
- out->d3d12_feature_level = data.d3d12_feature_level();
- out->vulkan_version = data.vulkan_version();
- return true;
-}
-
bool StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo>::Read(
gpu::mojom::OverlayInfoDataView data,
gpu::OverlayInfo* out) {
out->direct_composition = data.direct_composition();
out->supports_overlays = data.supports_overlays();
return data.ReadYuy2OverlaySupport(&out->yuy2_overlay_support) &&
- data.ReadNv12OverlaySupport(&out->nv12_overlay_support);
+ data.ReadNv12OverlaySupport(&out->nv12_overlay_support) &&
+ data.ReadBgra8OverlaySupport(&out->bgra8_overlay_support) &&
+ data.ReadRgb10a2OverlaySupport(&out->rgb10a2_overlay_support);
}
#endif
@@ -402,6 +395,11 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
out->oop_rasterization_supported = data.oop_rasterization_supported();
out->subpixel_font_rendering = data.subpixel_font_rendering();
+#if defined(OS_WIN)
+ out->d3d12_feature_level = data.d3d12_feature_level();
+ out->vulkan_version = data.vulkan_version();
+#endif
+
return data.ReadInitializationTime(&out->initialization_time) &&
data.ReadGpu(&out->gpu) &&
data.ReadSecondaryGpus(&out->secondary_gpus) &&
@@ -421,7 +419,6 @@ bool StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo>::Read(
#if defined(OS_WIN)
data.ReadOverlayInfo(&out->overlay_info) &&
data.ReadDxDiagnostics(&out->dx_diagnostics) &&
- data.ReadDx12VulkanVersionInfo(&out->dx12_vulkan_version_info) &&
#endif
data.ReadVideoDecodeAcceleratorCapabilities(
&out->video_decode_accelerator_capabilities) &&
diff --git a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
index 5fc0b439a94..09778fffb31 100644
--- a/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
+++ b/chromium/gpu/ipc/common/gpu_info_mojom_traits.h
@@ -36,6 +36,10 @@ struct StructTraits<gpu::mojom::GpuDeviceDataView, gpu::GPUInfo::GPUDevice> {
static uint32_t revision(const gpu::GPUInfo::GPUDevice& input) {
return input.revision;
}
+
+ static const LUID luid(const gpu::GPUInfo::GPUDevice& input) {
+ return input.luid;
+ }
#endif // OS_WIN
static bool active(const gpu::GPUInfo::GPUDevice& input) {
@@ -208,29 +212,6 @@ struct EnumTraits<gpu::mojom::OverlaySupport, gpu::OverlaySupport> {
};
template <>
-struct StructTraits<gpu::mojom::Dx12VulkanVersionInfoDataView,
- gpu::Dx12VulkanVersionInfo> {
- static bool Read(gpu::mojom::Dx12VulkanVersionInfoDataView data,
- gpu::Dx12VulkanVersionInfo* out);
-
- static bool supports_dx12(const gpu::Dx12VulkanVersionInfo& input) {
- return input.supports_dx12;
- }
-
- static bool supports_vulkan(const gpu::Dx12VulkanVersionInfo& input) {
- return input.supports_vulkan;
- }
-
- static uint32_t d3d12_feature_level(const gpu::Dx12VulkanVersionInfo& input) {
- return input.d3d12_feature_level;
- }
-
- static uint32_t vulkan_version(const gpu::Dx12VulkanVersionInfo& input) {
- return input.vulkan_version;
- }
-};
-
-template <>
struct StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo> {
static bool Read(gpu::mojom::OverlayInfoDataView data, gpu::OverlayInfo* out);
@@ -251,6 +232,16 @@ struct StructTraits<gpu::mojom::OverlayInfoDataView, gpu::OverlayInfo> {
const gpu::OverlayInfo& input) {
return input.nv12_overlay_support;
}
+
+ static gpu::OverlaySupport bgra8_overlay_support(
+ const gpu::OverlayInfo& input) {
+ return input.bgra8_overlay_support;
+ }
+
+ static gpu::OverlaySupport rgb10a2_overlay_support(
+ const gpu::OverlayInfo& input) {
+ return input.rgb10a2_overlay_support;
+ }
};
#endif
@@ -360,14 +351,16 @@ struct StructTraits<gpu::mojom::GpuInfoDataView, gpu::GPUInfo> {
#endif // OS_MACOSX
#if defined(OS_WIN)
-
static const gpu::DxDiagNode& dx_diagnostics(const gpu::GPUInfo& input) {
return input.dx_diagnostics;
}
- static const gpu::Dx12VulkanVersionInfo& dx12_vulkan_version_info(
- const gpu::GPUInfo& input) {
- return input.dx12_vulkan_version_info;
+ static uint32_t d3d12_feature_level(const gpu::GPUInfo& input) {
+ return input.d3d12_feature_level;
+ }
+
+ static uint32_t vulkan_version(const gpu::GPUInfo& input) {
+ return input.vulkan_version;
}
static const gpu::OverlayInfo& overlay_info(const gpu::GPUInfo& input) {
diff --git a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
index 500ff3cb9e4..051e8a95b75 100644
--- a/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
+++ b/chromium/gpu/ipc/common/gpu_memory_buffer_impl_dxgi.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "gpu/ipc/common/gpu_memory_buffer_impl_dxgi.h"
#include "gpu/ipc/common/gpu_memory_buffer_support.h"
diff --git a/chromium/gpu/ipc/common/gpu_messages.h b/chromium/gpu/ipc/common/gpu_messages.h
index ca201770ab5..db1bb624ef7 100644
--- a/chromium/gpu/ipc/common/gpu_messages.h
+++ b/chromium/gpu/ipc/common/gpu_messages.h
@@ -189,9 +189,11 @@ IPC_MESSAGE_ROUTED2(GpuChannelMsg_PresentSwapChain,
uint32_t /* release_id */)
#endif // OS_WIN
#if defined(OS_FUCHSIA)
-IPC_MESSAGE_ROUTED2(GpuChannelMsg_RegisterSysmemBufferCollection,
+IPC_MESSAGE_ROUTED4(GpuChannelMsg_RegisterSysmemBufferCollection,
gfx::SysmemBufferCollectionId /* id */,
- zx::channel /* token */)
+ zx::channel /* token */,
+ gfx::BufferFormat /* format */,
+ gfx::BufferUsage /* usage */)
IPC_MESSAGE_ROUTED1(GpuChannelMsg_ReleaseSysmemBufferCollection,
gfx::SysmemBufferCollectionId /* id */)
#endif // OS_FUCHSIA
diff --git a/chromium/gpu/ipc/common/gpu_param_traits_macros.h b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
index 9fd93a4b637..02296d13bde 100644
--- a/chromium/gpu/ipc/common/gpu_param_traits_macros.h
+++ b/chromium/gpu/ipc/common/gpu_param_traits_macros.h
@@ -12,6 +12,7 @@
#include "gpu/command_buffer/common/swap_buffers_complete_params.h"
#include "gpu/gpu_export.h"
#include "gpu/ipc/common/gpu_command_buffer_traits.h"
+#include "gpu/ipc/common/surface_handle.h"
#include "ipc/ipc_message_macros.h"
#include "ui/gfx/ipc/gfx_param_traits.h"
#include "url/ipc/url_param_traits.h"
@@ -32,4 +33,8 @@ IPC_STRUCT_TRAITS_END()
IPC_ENUM_TRAITS_MAX_VALUE(viz::ResourceFormat, viz::RESOURCE_FORMAT_MAX)
+#if defined(USE_X11)
+IPC_ENUM_TRAITS(gpu::SurfaceHandle)
+#endif
+
#endif // GPU_IPC_COMMON_GPU_PARAM_TRAITS_MACROS_H_
diff --git a/chromium/gpu/ipc/common/luid.mojom b/chromium/gpu/ipc/common/luid.mojom
new file mode 100644
index 00000000000..68da5dbda46
--- /dev/null
+++ b/chromium/gpu/ipc/common/luid.mojom
@@ -0,0 +1,12 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module gpu.mojom;
+
+// Corresponds to LUID in dxgi.h
+[EnableIf=is_win]
+struct Luid {
+ int32 high;
+ uint32 low;
+};
diff --git a/chromium/gpu/ipc/common/luid_mojom_traits.h b/chromium/gpu/ipc/common/luid_mojom_traits.h
new file mode 100644
index 00000000000..e736c53c455
--- /dev/null
+++ b/chromium/gpu/ipc/common/luid_mojom_traits.h
@@ -0,0 +1,27 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
+#define GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
+
+#include "gpu/ipc/common/luid.mojom-shared.h"
+
+namespace mojo {
+
+template <>
+struct StructTraits<gpu::mojom::LuidDataView, LUID> {
+ static bool Read(gpu::mojom::LuidDataView data, LUID* out) {
+ out->HighPart = data.high();
+ out->LowPart = data.low();
+ return true;
+ }
+
+ static int32_t high(const LUID& input) { return input.HighPart; }
+
+ static uint32_t low(const LUID& input) { return input.LowPart; }
+};
+
+} // namespace mojo
+
+#endif // GPU_IPC_COMMON_LUID_MOJOM_TRAITS_H_
diff --git a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
index 5a027baac2c..796466533fb 100644
--- a/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
+++ b/chromium/gpu/ipc/in_process_gpu_thread_holder.cc
@@ -83,17 +83,19 @@ void InProcessGpuThreadHolder::InitializeOnGpuThread(
GpuDriverBugWorkarounds gpu_driver_bug_workarounds(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
- bool use_virtualized_gl_context_ = false;
+ bool use_virtualized_gl_context = false;
#if defined(OS_MACOSX)
// Virtualize GpuPreference:::kLowPower contexts by default on OS X to prevent
// performance regressions when enabling FCM. https://crbug.com/180463
- use_virtualized_gl_context_ = true;
+ use_virtualized_gl_context = true;
#endif
- use_virtualized_gl_context_ |=
+ use_virtualized_gl_context |=
gpu_driver_bug_workarounds.use_virtualized_gl_contexts;
+ if (use_virtualized_gl_context)
+ share_group_->SetSharedContext(context_.get());
context_state_ = base::MakeRefCounted<SharedContextState>(
- share_group_, surface_, context_, use_virtualized_gl_context_,
+ share_group_, surface_, context_, use_virtualized_gl_context,
base::DoNothing(), gpu_preferences_.gr_context_type);
auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
gpu_driver_bug_workarounds, gpu_feature_info_);
diff --git a/chromium/gpu/ipc/scheduler_sequence.h b/chromium/gpu/ipc/scheduler_sequence.h
index d099492564c..15ae245cfe1 100644
--- a/chromium/gpu/ipc/scheduler_sequence.h
+++ b/chromium/gpu/ipc/scheduler_sequence.h
@@ -9,7 +9,7 @@
#include <vector>
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "gpu/command_buffer/common/sync_token.h"
#include "gpu/command_buffer/service/sequence_id.h"
diff --git a/chromium/gpu/ipc/service/BUILD.gn b/chromium/gpu/ipc/service/BUILD.gn
index a6cdd1737d6..4450d8337f0 100644
--- a/chromium/gpu/ipc/service/BUILD.gn
+++ b/chromium/gpu/ipc/service/BUILD.gn
@@ -32,6 +32,8 @@ jumbo_component("service") {
"gpu_config.h",
"gpu_init.cc",
"gpu_init.h",
+ "gpu_memory_ablation_experiment.cc",
+ "gpu_memory_ablation_experiment.h",
"gpu_memory_buffer_factory.cc",
"gpu_memory_buffer_factory.h",
"gpu_watchdog_thread.cc",
diff --git a/chromium/gpu/ipc/service/context_url.cc b/chromium/gpu/ipc/service/context_url.cc
index a02b18257f5..4fd16df8412 100644
--- a/chromium/gpu/ipc/service/context_url.cc
+++ b/chromium/gpu/ipc/service/context_url.cc
@@ -20,9 +20,10 @@ void ContextUrl::SetActiveUrl(const gpu::ContextUrl& active_url) {
last_url_hash = active_url.hash();
- // Note that the url is intentionally excluded from webview crash dumps
- // using a whitelist for privacy reasons. See kWebViewCrashKeyWhiteList.
- static crash_reporter::CrashKeyString<1024> crash_key("url-chunk");
+ // Note that the url is intentionally excluded from WebView and WebLayer
+ // crash dumps using an allowlist for privacy reasons. See
+ // kWebViewCrashKeyAllowList and kWebLayerCrashKeyAllowList.
+ static crash_reporter::CrashKeyString<1024> crash_key("gpu-url-chunk");
crash_key.Set(active_url.url().possibly_invalid_spec());
}
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.cc b/chromium/gpu/ipc/service/gpu_channel_manager.cc
index 52d8cb82c4d..98f44c24c28 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.cc
@@ -33,6 +33,7 @@
#include "gpu/ipc/common/memory_stats.h"
#include "gpu/ipc/service/gpu_channel.h"
#include "gpu/ipc/service/gpu_channel_manager_delegate.h"
+#include "gpu/ipc/service/gpu_memory_ablation_experiment.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/ipc/service/gpu_watchdog_thread.h"
#include "third_party/skia/include/core/SkGraphics.h"
@@ -99,8 +100,11 @@ void FormatAllocationSourcesForTracing(
} // namespace
-GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor()
- : weak_factory_(this) {}
+GpuChannelManager::GpuPeakMemoryMonitor::GpuPeakMemoryMonitor(
+ GpuChannelManager* channel_manager)
+ : ablation_experiment_(
+ std::make_unique<GpuMemoryAblationExperiment>(channel_manager)),
+ weak_factory_(this) {}
GpuChannelManager::GpuPeakMemoryMonitor::~GpuPeakMemoryMonitor() = default;
@@ -114,6 +118,12 @@ GpuChannelManager::GpuPeakMemoryMonitor::GetPeakMemoryUsage(
if (sequence != sequence_trackers_.end()) {
*out_peak_memory = sequence->second.total_memory_;
allocation_per_source = sequence->second.peak_memory_per_source_;
+
+ uint64_t ablation_memory =
+ ablation_experiment_->GetPeakMemory(sequence_num);
+ *out_peak_memory += ablation_memory;
+ allocation_per_source[GpuPeakMemoryAllocationSource::SHARED_IMAGE_STUB] +=
+ ablation_memory;
}
return allocation_per_source;
}
@@ -123,6 +133,7 @@ void GpuChannelManager::GpuPeakMemoryMonitor::StartGpuMemoryTracking(
sequence_trackers_.emplace(
sequence_num,
SequenceTracker(current_memory_, current_memory_per_source_));
+ ablation_experiment_->StartSequence(sequence_num);
TRACE_EVENT_ASYNC_BEGIN2("gpu", "PeakMemoryTracking", sequence_num, "start",
current_memory_, "start_sources",
StartTrackingTracedValue());
@@ -136,6 +147,7 @@ void GpuChannelManager::GpuPeakMemoryMonitor::StopGpuMemoryTracking(
sequence->second.total_memory_, "end_sources",
StopTrackingTracedValue(sequence->second));
sequence_trackers_.erase(sequence);
+ ablation_experiment_->StopSequence(sequence_num);
}
}
@@ -217,6 +229,8 @@ void GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange(
uint64_t diff = new_size - old_size;
current_memory_ += diff;
current_memory_per_source_[source] += diff;
+
+ ablation_experiment_->OnMemoryAllocated(old_size, new_size);
if (old_size < new_size) {
// When memory has increased, iterate over the sequences to update their
// peak.
@@ -279,11 +293,13 @@ GpuChannelManager::GpuChannelManager(
image_decode_accelerator_worker_(image_decode_accelerator_worker),
activity_flags_(std::move(activity_flags)),
memory_pressure_listener_(
+ FROM_HERE,
base::BindRepeating(&GpuChannelManager::HandleMemoryPressure,
base::Unretained(this))),
vulkan_context_provider_(vulkan_context_provider),
metal_context_provider_(metal_context_provider),
- dawn_context_provider_(dawn_context_provider) {
+ dawn_context_provider_(dawn_context_provider),
+ peak_memory_monitor_(this) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(io_task_runner);
@@ -721,7 +737,7 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
}
// TODO(penghuang): https://crbug.com/899735 Handle device lost for Vulkan.
- shared_context_state_ = base::MakeRefCounted<SharedContextState>(
+ auto shared_context_state = base::MakeRefCounted<SharedContextState>(
std::move(share_group), std::move(surface), std::move(context),
use_virtualized_gl_contexts,
base::BindOnce(&GpuChannelManager::OnContextLost, base::Unretained(this),
@@ -738,24 +754,33 @@ scoped_refptr<SharedContextState> GpuChannelManager::GetSharedContextState(
// SkiaRenderer needs GrContext to composite output surface.
need_gr_context |= features::IsUsingSkiaRenderer();
+ // GpuMemoryAblationExperiment needs a context to use Skia for Gpu
+ // allocations.
+ need_gr_context |= base::FeatureList::IsEnabled(kGPUMemoryAblationFeature);
+
if (need_gr_context) {
if (gpu_preferences_.gr_context_type == gpu::GrContextType::kGL) {
auto feature_info = base::MakeRefCounted<gles2::FeatureInfo>(
gpu_driver_bug_workarounds(), gpu_feature_info());
- if (!shared_context_state_->InitializeGL(gpu_preferences_,
- feature_info.get())) {
- shared_context_state_ = nullptr;
+ if (!shared_context_state->InitializeGL(gpu_preferences_,
+ feature_info.get())) {
LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize GL "
"for SharedContextState";
*result = ContextResult::kFatalFailure;
return nullptr;
}
}
- shared_context_state_->InitializeGrContext(
- gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
- &activity_flags_, watchdog_);
+ if (!shared_context_state->InitializeGrContext(
+ gpu_preferences_, gpu_driver_bug_workarounds_, gr_shader_cache(),
+ &activity_flags_, watchdog_)) {
+ LOG(ERROR) << "ContextResult::kFatalFailure: Failed to Initialize"
+ "GrContext for SharedContextState";
+ *result = ContextResult::kFatalFailure;
+ return nullptr;
+ }
}
+ shared_context_state_ = std::move(shared_context_state);
gr_cache_controller_.emplace(shared_context_state_.get(), task_runner_);
*result = ContextResult::kSuccess;
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager.h b/chromium/gpu/ipc/service/gpu_channel_manager.h
index 4846800441e..6413a3ed829 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager.h
@@ -56,6 +56,7 @@ struct GpuPreferences;
struct SyncToken;
class GpuChannel;
class GpuChannelManagerDelegate;
+class GpuMemoryAblationExperiment;
class GpuMemoryBufferFactory;
class GpuWatchdogThread;
class ImageDecodeAcceleratorWorker;
@@ -205,7 +206,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
class GPU_IPC_SERVICE_EXPORT GpuPeakMemoryMonitor
: public MemoryTracker::Observer {
public:
- GpuPeakMemoryMonitor();
+ explicit GpuPeakMemoryMonitor(GpuChannelManager* channel_manager);
~GpuPeakMemoryMonitor() override;
base::flat_map<GpuPeakMemoryAllocationSource, uint64_t> GetPeakMemoryUsage(
@@ -253,6 +254,7 @@ class GPU_IPC_SERVICE_EXPORT GpuChannelManager
base::flat_map<GpuPeakMemoryAllocationSource, uint64_t>
current_memory_per_source_;
+ std::unique_ptr<GpuMemoryAblationExperiment> ablation_experiment_;
base::WeakPtrFactory<GpuPeakMemoryMonitor> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(GpuPeakMemoryMonitor);
};
diff --git a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
index 98897f9ad8a..3ddc375f308 100644
--- a/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
+++ b/chromium/gpu/ipc/service/gpu_channel_manager_delegate.h
@@ -74,6 +74,9 @@ class GpuChannelManagerDelegate {
// Tells the delegate that overlay info was updated.
virtual void DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) = 0;
+ // Tells the delegate that HDR status was updated.
+ virtual void DidUpdateHDRStatus(bool hdr_enabled) = 0;
+
// Tells the delegate that |child_window| was created in the GPU process and
// to send an IPC to make SetParent() syscall. This syscall is blocked by the
// GPU sandbox and must be made in the browser process.
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.cc b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
index a52769db1dd..6e1c4bebd1a 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.cc
@@ -5,9 +5,11 @@
#include "gpu/ipc/service/gpu_channel_test_common.h"
#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/test/scoped_feature_list.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
+#include "components/viz/common/features.h"
#include "gpu/command_buffer/common/activity_flags.h"
#include "gpu/command_buffer/service/scheduler.h"
#include "gpu/command_buffer/service/shared_image_manager.h"
@@ -46,6 +48,7 @@ class TestGpuChannelManagerDelegate : public GpuChannelManagerDelegate {
bool IsExiting() const override { return is_exiting_; }
#if defined(OS_WIN)
void DidUpdateOverlayInfo(const gpu::OverlayInfo& overlay_info) override {}
+ void DidUpdateHDRStatus(bool hdr_enabled) override {}
void SendCreatedChildWindow(SurfaceHandle parent_window,
SurfaceHandle child_window) override {}
#endif
@@ -77,10 +80,14 @@ GpuChannelTestCommon::GpuChannelTestCommon(
channel_manager_delegate_(
new TestGpuChannelManagerDelegate(scheduler_.get())) {
// We need GL bindings to actually initialize command buffers.
- if (use_stub_bindings)
+ if (use_stub_bindings) {
gl::GLSurfaceTestSupport::InitializeOneOffWithStubBindings();
- else
+ // GrContext cannot be created with stub bindings.
+ scoped_feature_list_ = std::make_unique<base::test::ScopedFeatureList>();
+ scoped_feature_list_->InitAndDisableFeature(features::kUseSkiaRenderer);
+ } else {
gl::GLSurfaceTestSupport::InitializeOneOff();
+ }
GpuFeatureInfo feature_info;
feature_info.enabled_gpu_driver_bug_workarounds =
diff --git a/chromium/gpu/ipc/service/gpu_channel_test_common.h b/chromium/gpu/ipc/service/gpu_channel_test_common.h
index 1a15276f605..504a4720457 100644
--- a/chromium/gpu/ipc/service/gpu_channel_test_common.h
+++ b/chromium/gpu/ipc/service/gpu_channel_test_common.h
@@ -15,9 +15,15 @@
namespace base {
class TestSimpleTaskRunner;
+
+namespace test {
+class ScopedFeatureList;
+} // namespace test
+
namespace trace_event {
class MemoryDumpManager;
} // namespace trace_event
+
} // namespace base
namespace IPC {
@@ -63,6 +69,7 @@ class GpuChannelTestCommon : public testing::Test {
std::unique_ptr<SharedImageManager> shared_image_manager_;
std::unique_ptr<Scheduler> scheduler_;
std::unique_ptr<TestGpuChannelManagerDelegate> channel_manager_delegate_;
+ std::unique_ptr<base::test::ScopedFeatureList> scoped_feature_list_;
std::unique_ptr<GpuChannelManager> channel_manager_;
DISALLOW_COPY_AND_ASSIGN(GpuChannelTestCommon);
diff --git a/chromium/gpu/ipc/service/gpu_channel_unittest.cc b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
index d9eb13b7c07..4475b8c8503 100644
--- a/chromium/gpu/ipc/service/gpu_channel_unittest.cc
+++ b/chromium/gpu/ipc/service/gpu_channel_unittest.cc
@@ -21,6 +21,8 @@ class GpuChannelTest : public GpuChannelTestCommon {
#if defined(OS_WIN)
const SurfaceHandle kFakeSurfaceHandle = reinterpret_cast<SurfaceHandle>(1);
+#elif defined(USE_X11)
+const SurfaceHandle kFakeSurfaceHandle = static_cast<SurfaceHandle>(1);
#else
const SurfaceHandle kFakeSurfaceHandle = 1;
#endif
diff --git a/chromium/gpu/ipc/service/gpu_init.cc b/chromium/gpu/ipc/service/gpu_init.cc
index da3cadecbcd..fa0b192cc67 100644
--- a/chromium/gpu/ipc/service/gpu_init.cc
+++ b/chromium/gpu/ipc/service/gpu_init.cc
@@ -7,6 +7,7 @@
#include <string>
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/string_number_conversions.h"
#include "base/threading/scoped_blocking_call.h"
@@ -56,6 +57,7 @@
#include "gpu/vulkan/init/vulkan_factory.h"
#include "gpu/vulkan/vulkan_implementation.h"
#include "gpu/vulkan/vulkan_instance.h"
+#include "gpu/vulkan/vulkan_util.h"
#endif
namespace gpu {
@@ -67,7 +69,7 @@ bool CollectGraphicsInfo(GPUInfo* gpu_info) {
base::TimeTicks before_collect_context_graphics_info = base::TimeTicks::Now();
bool success = CollectContextGraphicsInfo(gpu_info);
if (!success)
- LOG(ERROR) << "gpu::CollectGraphicsInfo failed.";
+ LOG(ERROR) << "CollectGraphicsInfo failed.";
if (success) {
base::TimeDelta collect_context_time =
@@ -113,17 +115,33 @@ class GpuWatchdogInit {
watchdog_ptr_->OnInitComplete();
}
- void SetGpuWatchdogPtr(gpu::GpuWatchdogThread* ptr) { watchdog_ptr_ = ptr; }
+ void SetGpuWatchdogPtr(GpuWatchdogThread* ptr) { watchdog_ptr_ = ptr; }
private:
- gpu::GpuWatchdogThread* watchdog_ptr_ = nullptr;
+ GpuWatchdogThread* watchdog_ptr_ = nullptr;
};
+
+// TODO(https://crbug.com/1095744): We currently do not handle
+// VK_ERROR_DEVICE_LOST in in-process-gpu.
+void DisableInProcessGpuVulkan(GpuFeatureInfo* gpu_feature_info,
+ GpuPreferences* gpu_preferences) {
+ if (gpu_feature_info->status_values[GPU_FEATURE_TYPE_VULKAN] ==
+ kGpuFeatureStatusEnabled) {
+ LOG(ERROR) << "Vulkan not supported with in process gpu";
+ gpu_preferences->use_vulkan = VulkanImplementationName::kNone;
+ gpu_feature_info->status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusDisabled;
+ if (gpu_preferences->gr_context_type == GrContextType::kVulkan)
+ gpu_preferences->gr_context_type = GrContextType::kGL;
+ }
+}
+
} // namespace
GpuInit::GpuInit() = default;
GpuInit::~GpuInit() {
- gpu::StopForceDiscreteGPU();
+ StopForceDiscreteGPU();
}
bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
@@ -146,7 +164,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Set keys for crash logging based on preliminary gpu info, in case we
// crash during feature collection.
- gpu::SetKeysForCrashLogging(gpu_info_);
+ SetKeysForCrashLogging(gpu_info_);
#if defined(SUBPIXEL_FONT_RENDERING_DISABLED)
gpu_info_.subpixel_font_rendering = false;
#else
@@ -168,31 +186,31 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
if (!PopGpuFeatureInfoCache(&gpu_feature_info_)) {
// Compute blacklist and driver bug workaround decisions based on basic GPU
// info.
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
- gpu_info_, gpu_preferences_, command_line, &needs_more_info);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
+ command_line, &needs_more_info);
}
#endif // !OS_ANDROID && !BUILDFLAG(IS_CHROMECAST)
gpu_info_.in_process_gpu = false;
- bool use_swiftshader = false;
+ gl_use_swiftshader_ = false;
// GL bindings may have already been initialized, specifically on MacOSX.
bool gl_initialized = gl::GetGLImplementation() != gl::kGLImplementationNone;
if (!gl_initialized) {
// If GL has already been initialized, then it's too late to select GPU.
- if (gpu::SwitchableGPUsSupported(gpu_info_, *command_line)) {
- gpu::InitializeSwitchableGPUs(
+ if (SwitchableGPUsSupported(gpu_info_, *command_line)) {
+ InitializeSwitchableGPUs(
gpu_feature_info_.enabled_gpu_driver_bug_workarounds);
}
} else if (gl::GetGLImplementation() == gl::kGLImplementationSwiftShaderGL &&
command_line->GetSwitchValueASCII(switches::kUseGL) !=
gl::kGLImplementationSwiftShaderName) {
- use_swiftshader = true;
+ gl_use_swiftshader_ = true;
}
bool enable_watchdog = !gpu_preferences_.disable_gpu_watchdog &&
!command_line->HasSwitch(switches::kHeadless) &&
- !use_swiftshader;
+ !gl_use_swiftshader_;
// Disable the watchdog in debug builds because they tend to only be run by
// developers who will not appreciate the watchdog killing the GPU process.
@@ -233,11 +251,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// consuming has completed, otherwise the process is liable to be aborted.
if (enable_watchdog && !delayed_watchdog_enable) {
if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2)) {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV2::Create(
gpu_preferences_.watchdog_starts_backgrounded);
watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
} else {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV1::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV1::Create(
gpu_preferences_.watchdog_starts_backgrounded);
}
@@ -282,12 +300,12 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
->GetSupportedFormatsForTexturing();
#endif
- if (!use_swiftshader) {
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ if (!gl_use_swiftshader_) {
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, needs_more_info);
}
- if (gl_initialized && use_swiftshader &&
+ if (gl_initialized && gl_use_swiftshader_ &&
gl::GetGLImplementation() != gl::kGLImplementationSwiftShaderGL) {
#if defined(OS_LINUX)
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
@@ -324,7 +342,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
#if defined(OS_LINUX)
// The ContentSandboxHelper is currently the only one implementation of
- // gpu::GpuSandboxHelper and it has no dependency. Except on Linux where
+ // GpuSandboxHelper and it has no dependency. Except on Linux where
// VaapiWrapper checks the GL implementation to determine which display
// to use. So call PreSandboxStartup after GL initialization. But make
// sure the watchdog is paused as loadLibrary may take a long time and
@@ -350,7 +368,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
#if defined(OS_MACOSX)
if (gl::GetGLImplementation() == gl::kGLImplementationEGLANGLE &&
gl::GetANGLEImplementation() == gl::ANGLEImplementation::kSwiftShader) {
- gpu::SetMacOSSpecificTextureTarget(GL_TEXTURE_2D);
+ SetMacOSSpecificTextureTarget(GL_TEXTURE_2D);
}
#endif // defined(OS_MACOSX)
@@ -363,16 +381,17 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// We need to collect GL strings (VENDOR, RENDERER) for blacklisting purposes.
if (!gl_disabled) {
- if (!use_swiftshader) {
+ if (!gl_use_swiftshader_) {
if (!CollectGraphicsInfo(&gpu_info_))
return false;
- gpu::SetKeysForCrashLogging(gpu_info_);
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(
- gpu_info_, gpu_preferences_, command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+
+ SetKeysForCrashLogging(gpu_info_);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
+ command_line, nullptr);
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
#if defined(OS_LINUX)
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
<< "on Linux";
@@ -389,34 +408,54 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
#endif // OS_LINUX
}
- } else { // use_swiftshader == true
+ } else { // gl_use_swiftshader_ == true
switch (gpu_preferences_.use_vulkan) {
- case gpu::VulkanImplementationName::kNative: {
+ case VulkanImplementationName::kNative: {
// Collect GPU info, so we can use blacklist to disable vulkan if it
// is needed.
- gpu::GPUInfo gpu_info;
+ GPUInfo gpu_info;
if (!CollectGraphicsInfo(&gpu_info))
return false;
- auto gpu_feature_info = gpu::ComputeGpuFeatureInfo(
+ auto gpu_feature_info = ComputeGpuFeatureInfo(
gpu_info, gpu_preferences_, command_line, nullptr);
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu_feature_info.status_values[gpu::GPU_FEATURE_TYPE_VULKAN];
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ gpu_feature_info.status_values[GPU_FEATURE_TYPE_VULKAN];
break;
}
- case gpu::VulkanImplementationName::kForcedNative:
- case gpu::VulkanImplementationName::kSwiftshader:
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusEnabled;
+ case VulkanImplementationName::kForcedNative:
+ case VulkanImplementationName::kSwiftshader:
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusEnabled;
break;
- case gpu::VulkanImplementationName::kNone:
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
+ case VulkanImplementationName::kNone:
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusDisabled;
break;
}
}
}
- InitializeVulkan();
+ if (gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] !=
+ kGpuFeatureStatusEnabled ||
+ !InitializeVulkan()) {
+ gpu_preferences_.use_vulkan = VulkanImplementationName::kNone;
+ gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN] =
+ kGpuFeatureStatusDisabled;
+ if (gpu_preferences_.gr_context_type == GrContextType::kVulkan) {
+#if defined(OS_FUCHSIA)
+ // Fuchsia uses ANGLE for GL which requires Vulkan, so don't fall
+ // back to GL if Vulkan init fails.
+ LOG(FATAL) << "Vulkan initialization failed";
+#endif
+ gpu_preferences_.gr_context_type = GrContextType::kGL;
+ }
+ } else {
+ // TODO(https://crbug.com/1095744): It would be better to cleanly tear
+ // down and recreate the VkDevice on VK_ERROR_DEVICE_LOST. Until that
+ // happens, we will exit_on_context_lost to ensure there are no leaks.
+ gpu_feature_info_.enabled_gpu_driver_bug_workarounds.push_back(
+ EXIT_ON_CONTEXT_LOST);
+ }
// Collect GPU process info
if (!gl_disabled) {
@@ -447,16 +486,16 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
- if (!gl_disabled && !use_swiftshader) {
+ if (!gl_disabled && !gl_use_swiftshader_) {
if (!CollectGraphicsInfo(&gpu_info_))
return false;
- gpu::SetKeysForCrashLogging(gpu_info_);
- gpu_feature_info_ = gpu::ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
- command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ SetKeysForCrashLogging(gpu_info_);
+ gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
+ command_line, nullptr);
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
<< "on Linux";
return false;
@@ -464,7 +503,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
}
#endif // defined(OS_LINUX)
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
AdjustInfoToSwiftShader();
}
@@ -482,14 +521,14 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
// Software GL is expected to run slowly, so disable the watchdog
// in that case.
// In SwiftShader case, the implementation is actually EGLGLES2.
- if (!use_swiftshader && command_line->HasSwitch(switches::kUseGL)) {
+ if (!gl_use_swiftshader_ && command_line->HasSwitch(switches::kUseGL)) {
std::string use_gl = command_line->GetSwitchValueASCII(switches::kUseGL);
if (use_gl == gl::kGLImplementationSwiftShaderName ||
use_gl == gl::kGLImplementationSwiftShaderForWebGLName) {
- use_swiftshader = true;
+ gl_use_swiftshader_ = true;
}
}
- if (use_swiftshader ||
+ if (gl_use_swiftshader_ ||
gl::GetGLImplementation() == gl::GetSoftwareGLImplementation()) {
gpu_info_.software_rendering = true;
watchdog_thread_ = nullptr;
@@ -499,11 +538,11 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandLine* command_line,
watchdog_init.SetGpuWatchdogPtr(nullptr);
} else if (enable_watchdog && delayed_watchdog_enable) {
if (base::FeatureList::IsEnabled(features::kGpuWatchdogV2)) {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV2::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV2::Create(
gpu_preferences_.watchdog_starts_backgrounded);
watchdog_init.SetGpuWatchdogPtr(watchdog_thread_.get());
} else {
- watchdog_thread_ = gpu::GpuWatchdogThreadImplV1::Create(
+ watchdog_thread_ = GpuWatchdogThreadImplV1::Create(
gpu_preferences_.watchdog_starts_backgrounded);
}
}
@@ -544,8 +583,8 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
InitializeGLThreadSafe(command_line, gpu_preferences_, &gpu_info_,
&gpu_feature_info_);
- InitializeVulkan();
+ DisableInProcessGpuVulkan(&gpu_feature_info_, &gpu_preferences_);
default_offscreen_surface_ = gl::init::CreateOffscreenGLSurface(gfx::Size());
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
@@ -585,7 +624,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
#endif // !BUILDFLAG(IS_CHROMECAST)
- bool use_swiftshader = EnableSwiftShaderIfNeeded(
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, needs_more_info);
if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
@@ -594,14 +633,14 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
- if (!gl_disabled && !use_swiftshader) {
+ if (!gl_disabled && !gl_use_swiftshader_) {
CollectContextGraphicsInfo(&gpu_info_);
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
gl::init::ShutdownGL(true);
if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed "
@@ -632,14 +671,14 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
- if (!gl_disabled && !use_swiftshader) {
+ if (!gl_disabled && !gl_use_swiftshader_) {
CollectContextGraphicsInfo(&gpu_info_);
gpu_feature_info_ = ComputeGpuFeatureInfo(gpu_info_, gpu_preferences_,
command_line, nullptr);
- use_swiftshader = EnableSwiftShaderIfNeeded(
+ gl_use_swiftshader_ = EnableSwiftShaderIfNeeded(
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
gl::init::ShutdownGL(true);
if (!gl::init::InitializeGLNoExtensionsOneOff(/*init_bindings*/ true)) {
VLOG(1) << "gl::init::InitializeGLNoExtensionsOneOff failed "
@@ -650,7 +689,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
}
#endif // defined(OS_LINUX)
- if (use_swiftshader) {
+ if (gl_use_swiftshader_) {
AdjustInfoToSwiftShader();
}
@@ -659,6 +698,8 @@ void GpuInit::InitializeInProcess(base::CommandLine* command_line,
std::move(supported_buffer_formats_for_texturing);
#endif
+ DisableInProcessGpuVulkan(&gpu_feature_info_, &gpu_preferences_);
+
UMA_HISTOGRAM_ENUMERATION("GPU.GLImplementation", gl::GetGLImplementation());
}
#endif // OS_ANDROID
@@ -675,63 +716,64 @@ scoped_refptr<gl::GLSurface> GpuInit::TakeDefaultOffscreenSurface() {
return std::move(default_offscreen_surface_);
}
-void GpuInit::InitializeVulkan() {
#if BUILDFLAG(ENABLE_VULKAN)
- if (gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] ==
- gpu::kGpuFeatureStatusEnabled) {
- DCHECK_NE(gpu_preferences_.use_vulkan,
- gpu::VulkanImplementationName::kNone);
- bool vulkan_use_swiftshader = gpu_preferences_.use_vulkan ==
- gpu::VulkanImplementationName::kSwiftshader;
- const bool enforce_protected_memory =
- gpu_preferences_.enforce_vulkan_protected_memory;
- vulkan_implementation_ = gpu::CreateVulkanImplementation(
- vulkan_use_swiftshader,
- enforce_protected_memory ? true : false /* allow_protected_memory */,
- enforce_protected_memory);
- if (!vulkan_implementation_ ||
- !vulkan_implementation_->InitializeVulkanInstance(
- !gpu_preferences_.disable_vulkan_surface)) {
- DLOG(ERROR) << "Failed to create and initialize Vulkan implementation.";
- vulkan_implementation_ = nullptr;
- CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing);
- }
- // TODO(penghuang): Remove GPU.SupportsVulkan and GPU.VulkanVersion from
- // //gpu/config/gpu_info_collector_win.cc when we are finch vulkan on
- // Windows.
- if (!vulkan_use_swiftshader) {
- const bool supports_vulkan = !!vulkan_implementation_;
- UMA_HISTOGRAM_BOOLEAN("GPU.SupportsVulkan", supports_vulkan);
- uint32_t vulkan_version = 0;
- if (supports_vulkan) {
- const auto& vulkan_info =
- vulkan_implementation_->GetVulkanInstance()->vulkan_info();
- vulkan_version = vulkan_info.used_api_version;
- }
- UMA_HISTOGRAM_ENUMERATION(
- "GPU.VulkanVersion", ConvertToHistogramVulkanVersion(vulkan_version));
+bool GpuInit::InitializeVulkan() {
+ DCHECK_EQ(gpu_feature_info_.status_values[GPU_FEATURE_TYPE_VULKAN],
+ kGpuFeatureStatusEnabled);
+ DCHECK_NE(gpu_preferences_.use_vulkan, VulkanImplementationName::kNone);
+ bool vulkan_use_swiftshader =
+ gpu_preferences_.use_vulkan == VulkanImplementationName::kSwiftshader;
+ bool forced_native =
+ gpu_preferences_.use_vulkan == VulkanImplementationName::kForcedNative;
+ bool use_swiftshader = gl_use_swiftshader_ || vulkan_use_swiftshader;
+
+ const bool enforce_protected_memory =
+ gpu_preferences_.enforce_vulkan_protected_memory;
+ vulkan_implementation_ = CreateVulkanImplementation(
+ vulkan_use_swiftshader,
+ enforce_protected_memory ? true : false /* allow_protected_memory */,
+ enforce_protected_memory);
+ if (!vulkan_implementation_ ||
+ !vulkan_implementation_->InitializeVulkanInstance(
+ !gpu_preferences_.disable_vulkan_surface)) {
+ DLOG(ERROR) << "Failed to create and initialize Vulkan implementation.";
+ vulkan_implementation_ = nullptr;
+ CHECK(!gpu_preferences_.disable_vulkan_fallback_to_gl_for_testing);
+ }
+
+ // Vulkan info is no longer collected in gpu/config/gpu_info_collector_win.cc
+ // Histogram GPU.SupportsVulkan and GPU.VulkanVersion were marked as expired.
+ // TODO(magchen): Add back these two histograms here and re-enable them in
+ // histograms.xml when we start Vulkan finch on Windows.
+ if (!vulkan_use_swiftshader) {
+ const bool supports_vulkan = !!vulkan_implementation_;
+ uint32_t vulkan_version = 0;
+ if (supports_vulkan) {
+ const auto& vulkan_info =
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+ vulkan_version = vulkan_info.used_api_version;
}
}
- if (!vulkan_implementation_) {
- if (gpu_preferences_.gr_context_type == gpu::GrContextType::kVulkan) {
-#if defined(OS_FUCHSIA)
- // Fuchsia uses ANGLE for GL which requires Vulkan, so don't fall
- // back to GL if Vulkan init fails.
- LOG(FATAL) << "Vulkan initialization failed";
-#endif
- gpu_preferences_.gr_context_type = gpu::GrContextType::kGL;
- }
- gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
- } else {
- gpu_info_.vulkan_info =
- vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+
+ if (!vulkan_implementation_)
+ return false;
+
+ if (!use_swiftshader && !forced_native &&
+ !CheckVulkanCompabilities(
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info(),
+ gpu_info_)) {
+ vulkan_implementation_.reset();
+ return false;
}
-#else
- gpu_preferences_.use_vulkan = gpu::VulkanImplementationName::kNone;
- gpu_feature_info_.status_values[gpu::GPU_FEATURE_TYPE_VULKAN] =
- gpu::kGpuFeatureStatusDisabled;
-#endif // BUILDFLAG(ENABLE_VULKAN)
+
+ gpu_info_.vulkan_info =
+ vulkan_implementation_->GetVulkanInstance()->vulkan_info();
+ return true;
}
+#else // BUILDFLAG(ENABLE_VULKAN)
+bool GpuInit::InitializeVulkan() {
+ return false;
+}
+#endif // !BUILDFLAG(ENABLE_VULKAN)
+
} // namespace gpu
diff --git a/chromium/gpu/ipc/service/gpu_init.h b/chromium/gpu/ipc/service/gpu_init.h
index 5f1d6fcdf02..3c236dbc502 100644
--- a/chromium/gpu/ipc/service/gpu_init.h
+++ b/chromium/gpu/ipc/service/gpu_init.h
@@ -83,9 +83,10 @@ class GPU_IPC_SERVICE_EXPORT GpuInit {
#endif
private:
- void InitializeVulkan();
+ bool InitializeVulkan();
GpuSandboxHelper* sandbox_helper_ = nullptr;
+ bool gl_use_swiftshader_ = false;
std::unique_ptr<GpuWatchdogThread> watchdog_thread_;
GPUInfo gpu_info_;
GpuFeatureInfo gpu_feature_info_;
diff --git a/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc
new file mode 100644
index 00000000000..4da13f07c52
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.cc
@@ -0,0 +1,227 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "gpu/ipc/service/gpu_memory_ablation_experiment.h"
+
+#include <algorithm>
+
+#include "base/bind.h"
+#include "base/time/time.h"
+#include "base/trace_event/common/trace_event_common.h"
+#include "components/viz/common/features.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/common/shared_image_usage.h"
+#include "gpu/command_buffer/service/mailbox_manager_impl.h"
+#include "gpu/command_buffer/service/shared_image_factory.h"
+#include "gpu/command_buffer/service/shared_image_representation.h"
+#include "gpu/ipc/common/surface_handle.h"
+#include "gpu/ipc/service/gpu_channel_manager.h"
+#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
+#include "third_party/skia/include/core/SkCanvas.h"
+#include "third_party/skia/include/core/SkColor.h"
+#include "ui/gl/gl_implementation.h"
+
+namespace gpu {
+
+// Main feature flag to control the entire experiment, encompassing bot CPU and
+// GPU ablations.
+const base::Feature kGPUMemoryAblationFeature{
+ "GPUMemoryAblation", base::FEATURE_DISABLED_BY_DEFAULT};
+
+// TODO(jonross): Replace these feature flags with Field Trial Param lookup.
+const base::Feature kGPUMemoryAblationGPUSmall{
+ "GPUMemoryAblationGPUSmall", base::FEATURE_DISABLED_BY_DEFAULT};
+
+const base::Feature kGPUMemoryAblationGPUMedium{
+ "GPUMemoryAblationGPUMedium", base::FEATURE_DISABLED_BY_DEFAULT};
+
+const base::Feature kGPUMemoryAblationGPULarge{
+ "GPUMemoryAblationGPULarge", base::FEATURE_DISABLED_BY_DEFAULT};
+
+// The size to use when allocating images. The sizes vary based on the chosen
+// experiment.
+constexpr gfx::Size kSmallSize(256, 256);
+constexpr gfx::Size kMediumSize(256 * 4, 256 * 4);
+constexpr gfx::Size kLargeSize(256 * 8, 256 * 8);
+
+// Image allocation parameters.
+constexpr viz::ResourceFormat kFormat = viz::ResourceFormat::RGBA_8888;
+constexpr uint32_t kUsage = SHARED_IMAGE_USAGE_DISPLAY;
+
+GpuMemoryAblationExperiment::GpuMemoryAblationExperiment(
+ GpuChannelManager* channel_manager)
+ : enabled_(base::FeatureList::IsEnabled(kGPUMemoryAblationFeature)),
+ channel_manager_(channel_manager) {}
+
+GpuMemoryAblationExperiment::~GpuMemoryAblationExperiment() = default;
+
+void GpuMemoryAblationExperiment::OnMemoryAllocated(uint64_t old_size,
+ uint64_t new_size) {
+ if (!enabled_)
+ return;
+ if (!init_) {
+ InitGpu(channel_manager_);
+ }
+ // TODO(jonross): Investigate why there are 0 size allocations.
+ if (new_size > old_size) {
+ // TODO(jonross): Impl CPU ablation
+ if (gpu_enabled_)
+ AllocateGpuMemory();
+ } else if (old_size > new_size) {
+ // TODO(jonross): Impl CPU ablation
+ if (gpu_enabled_ && !mailboxes_.empty()) {
+ DeleteGpuMemory();
+ }
+ }
+}
+
+uint64_t GpuMemoryAblationExperiment::GetPeakMemory(
+ uint32_t sequence_num) const {
+ auto it = sequences_.find(sequence_num);
+ if (it == sequences_.end())
+ return 0u;
+
+ return it->second.peak_memory_;
+}
+
+void GpuMemoryAblationExperiment::StartSequence(uint32_t sequence_num) {
+ sequences_.emplace(sequence_num, SequenceTracker());
+}
+
+void GpuMemoryAblationExperiment::StopSequence(uint32_t sequence_num) {
+ auto it = sequences_.find(sequence_num);
+ if (it == sequences_.end())
+ return;
+
+ TRACE_EVENT_INSTANT2("gpu.memory", "Memory.GPU.PeakMemoryUsage.AblationTimes",
+ TRACE_EVENT_SCOPE_THREAD, "alloc",
+ it->second.allocs_.InMilliseconds(), "dealloc",
+ it->second.deallocs_.InMilliseconds());
+
+ sequences_.erase(it);
+}
+
+void GpuMemoryAblationExperiment::AllocateGpuMemory() {
+ // We can't successfully create an image without a context, so do not even
+ // perform the initial allocations.
+ if (!MakeContextCurrent())
+ return;
+ base::Time start = base::Time::Now();
+
+ auto mailbox = Mailbox::GenerateForSharedImage();
+ auto color_space = gfx::ColorSpace::CreateSRGB();
+
+ if (!factory_->CreateSharedImage(mailbox, kFormat, size_, color_space,
+ gpu::kNullSurfaceHandle, kUsage)) {
+ return;
+ }
+
+ auto skia_rep = rep_factory_->ProduceSkia(mailbox, context_state_);
+ if (!skia_rep)
+ return;
+
+ auto write_access = skia_rep->BeginScopedWriteAccess(
+ /*begin_semaphores=*/{}, /*end_semaphores=*/{},
+ SharedImageRepresentation::AllowUnclearedAccess::kYes);
+ if (!write_access)
+ return;
+
+ auto* canvas = write_access->surface()->getCanvas();
+ canvas->clear(SK_ColorWHITE);
+
+ mailboxes_.push_back(mailbox);
+
+ base::TimeDelta delta = base::Time::Now() - start;
+ for (auto& it : sequences_)
+ it.second.allocs_ += delta;
+}
+
+void GpuMemoryAblationExperiment::DeleteGpuMemory() {
+ if (mailboxes_.empty())
+ return;
+ base::Time start = base::Time::Now();
+
+ auto mailbox = mailboxes_.front();
+ // We can't successfully destroy the image if we cannot get the context,
+ // however we still need to cleanup our internal state.
+ if (MakeContextCurrent())
+ factory_->DestroySharedImage(mailbox);
+
+ mailboxes_.erase(mailboxes_.begin());
+
+ base::TimeDelta delta = base::Time::Now() - start;
+ for (auto& it : sequences_)
+ it.second.deallocs_ += delta;
+}
+
+void GpuMemoryAblationExperiment::InitGpu(GpuChannelManager* channel_manager) {
+ // GPU Info Collection Process can be created, with no graphical output
+ // possible. Don't init there, as all future image operations will fail.
+ if (gl::GetGLImplementation() == gl::kGLImplementationDisabled)
+ return;
+
+ if (base::FeatureList::IsEnabled(kGPUMemoryAblationGPUSmall)) {
+ size_ = kSmallSize;
+ } else if (base::FeatureList::IsEnabled(kGPUMemoryAblationGPUMedium)) {
+ size_ = kMediumSize;
+ } else if (base::FeatureList::IsEnabled(kGPUMemoryAblationGPULarge)) {
+ size_ = kLargeSize;
+ }
+
+ ContextResult result;
+ context_state_ = channel_manager->GetSharedContextState(&result);
+ if (result != ContextResult::kSuccess || !MakeContextCurrent()) {
+ context_state_ = nullptr;
+ return;
+ }
+
+ gpu::GpuMemoryBufferFactory* gmb_factory =
+ channel_manager->gpu_memory_buffer_factory();
+ factory_ = std::make_unique<SharedImageFactory>(
+ channel_manager->gpu_preferences(),
+ channel_manager->gpu_driver_bug_workarounds(),
+ channel_manager->gpu_feature_info(), context_state_.get(),
+ channel_manager->mailbox_manager(),
+ channel_manager->shared_image_manager(),
+ gmb_factory ? gmb_factory->AsImageFactory() : nullptr, this,
+ features::IsUsingSkiaRenderer());
+
+ rep_factory_ = std::make_unique<SharedImageRepresentationFactory>(
+ channel_manager->shared_image_manager(), this);
+ gpu_enabled_ = true;
+ init_ = true;
+}
+
+bool GpuMemoryAblationExperiment::MakeContextCurrent() {
+ return context_state_->MakeCurrent(nullptr);
+}
+
+// MemoryTracker:
+void GpuMemoryAblationExperiment::TrackMemoryAllocatedChange(int64_t delta) {
+ DCHECK(delta >= 0 || gpu_allocation_size_ >= static_cast<uint64_t>(-delta));
+ gpu_allocation_size_ += delta;
+ for (auto& it : sequences_) {
+ if (gpu_allocation_size_ > it.second.peak_memory_)
+ it.second.peak_memory_ = gpu_allocation_size_;
+ }
+}
+
+// Unused methods that form the basis of memory dumps
+uint64_t GpuMemoryAblationExperiment::GetSize() const {
+ return 0u;
+}
+
+uint64_t GpuMemoryAblationExperiment::ClientTracingId() const {
+ return 0u;
+}
+
+int GpuMemoryAblationExperiment::ClientId() const {
+ return 0;
+}
+
+uint64_t GpuMemoryAblationExperiment::ContextGroupTracingId() const {
+ return 0u;
+}
+
+} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h
new file mode 100644
index 00000000000..e5f2c60c25e
--- /dev/null
+++ b/chromium/gpu/ipc/service/gpu_memory_ablation_experiment.h
@@ -0,0 +1,133 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_IPC_SERVICE_GPU_MEMORY_ABLATION_EXPERIMENT_H_
+#define GPU_IPC_SERVICE_GPU_MEMORY_ABLATION_EXPERIMENT_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/containers/flat_map.h"
+#include "base/feature_list.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
+#include "gpu/command_buffer/common/mailbox.h"
+#include "gpu/command_buffer/service/memory_tracking.h"
+#include "gpu/ipc/service/gpu_ipc_service_export.h"
+#include "ui/gfx/geometry/size.h"
+
+namespace gpu {
+class GpuChannelManager;
+class SharedContextState;
+class SharedImageFactory;
+class SharedImageRepresentationFactory;
+
+extern const base::Feature kGPUMemoryAblationFeature;
+
+// When enabled, this experiment allocates additional memory alongside each
+// normal allocation. This will allow a study of the correlation between
+// memory usage and performance metrics.
+//
+// Each increase reported to OnMemoryAllocated will allocate a chunk of memory.
+// Each decrease reported will release a previously allocated chunk.
+//
+// GpuMemoryAblationExperiment acts as the MemoryTracker for all of its own
+// allocations. This prevents a cycle of memory allocations:
+// - GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange
+// - GpuMemoryAblationExperiment::OnMemoryAllocated
+// - MemoryTracker::TrackMemoryAllocatedChange
+// - GpuChannelManager::GpuPeakMemoryMonitor::OnMemoryAllocatedChange
+// - etc.
+//
+// Instead this will track the memory it allocated, which can be retrieved via
+// GetPeakMemory.
+class GPU_IPC_SERVICE_EXPORT GpuMemoryAblationExperiment
+ : public MemoryTracker {
+ public:
+ explicit GpuMemoryAblationExperiment(GpuChannelManager* channel_manager);
+ ~GpuMemoryAblationExperiment() override;
+
+ // Allocates a chunk of memory in response to increases. Reported decreases
+ // will release previously allocated chunks. The amount of memory allocated
+ // is returned in bytes.
+ void OnMemoryAllocated(uint64_t old_size, uint64_t new_size);
+
+ uint64_t GetPeakMemory(uint32_t sequence_num) const;
+ void StartSequence(uint32_t sequence_num);
+ void StopSequence(uint32_t sequence_num);
+
+ private:
+ // Tracks the time spent doing the allocations/deallocations in order to
+ // determine if the change in metrics was solely due to the ablation.
+ //
+ // The memory allocated for ablation is not reported directly to
+ // GpuChannelManager::GpuPeakMemoryMonitor, as GpuMemoryAblationExperiment
+ // acts as the MemoryTracker for its own allocations. This tracks the peak
+ // allocation so that it can be reported.
+ struct SequenceTracker {
+ public:
+ SequenceTracker() = default;
+ ~SequenceTracker() = default;
+
+ base::TimeDelta allocs_;
+ base::TimeDelta deallocs_;
+ uint64_t peak_memory_ = 0u;
+ };
+
+ void AllocateGpuMemory();
+ void DeleteGpuMemory();
+
+ // Setups the Gpu resources needed to allocate Gpu RAM. These are influenced
+ // by SharedImageStub. Which is not used directly as there is no external
+ // host to pair a GpuChannel with.
+ void InitGpu(GpuChannelManager* channel_manager);
+
+ // This must be called before any actions on |factory_|. If this method fails
+ // then subsequent work on the |factory_| will fail. Also influenced by
+ // SharedImageStub.
+ bool MakeContextCurrent();
+
+ // MemoryTracker:
+ void TrackMemoryAllocatedChange(int64_t delta) override;
+ uint64_t GetSize() const override;
+ uint64_t ClientTracingId() const override;
+ int ClientId() const override;
+ uint64_t ContextGroupTracingId() const override;
+
+ // Whether or not the entire experiment is enabled.
+ bool enabled_;
+ bool init_ = false;
+ // If |true| then a Gpu ablation was requested, and initialization succeeded.
+ bool gpu_enabled_ = false;
+
+ // Size of image to allocate, determined by experiment parameters.
+ gfx::Size size_;
+
+ // The Mailboxes allocated for each image.
+ std::vector<Mailbox> mailboxes_;
+
+ // Tracks the time spent doing the allocations/deallocations, along with the
+ // peak memory allocated. Thus allowing to determine if the change in only
+ // metrics was solely due to the ablation.
+ base::flat_map<uint32_t, SequenceTracker> sequences_;
+
+ // The memory allocated for ablation is not reported directly to
+ // GpuChannelManager::GpuPeakMemoryMonitor, as this class acts as the
+ // MemoryTracker for its own allocations. Tracks the current amount of
+ // memory allocated as a part of the ablation.
+ uint64_t gpu_allocation_size_ = 0;
+
+ scoped_refptr<SharedContextState> context_state_;
+ std::unique_ptr<SharedImageFactory> factory_;
+ std::unique_ptr<SharedImageRepresentationFactory> rep_factory_;
+ GpuChannelManager* channel_manager_;
+ base::WeakPtrFactory<GpuMemoryAblationExperiment> weak_factory_{this};
+};
+
+} // namespace gpu
+
+#endif // GPU_IPC_SERVICE_GPU_MEMORY_ABLATION_EXPERIMENT_H_
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
index cc5d8a9f1c9..1e4bae0022a 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.cc
@@ -9,6 +9,7 @@
#include "base/debug/alias.h"
#include "base/files/file_util.h"
#include "base/format_macros.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
#include "base/metrics/histogram_functions.h"
@@ -53,17 +54,6 @@ const int kNewGpuTimeout = 17000;
const int kNewGpuTimeout = 15000;
#endif
-// Histogram parameters for GPU.WatchdogThread.V1.ExtraThreadTime and
-// GPU.WatchdogThread.V1.WaitTime
-constexpr base::TimeDelta kMin = base::TimeDelta::FromSeconds(1);
-constexpr base::TimeDelta kMax = base::TimeDelta::FromSeconds(150);
-constexpr int kBuckets = 50;
-
-// Histogram recorded in OnWatchdogTimeout()
-void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.V1.Timeout", timeout_event);
-}
-
#if defined(USE_X11)
const base::FilePath::CharType kTtyFilePath[] =
FILE_PATH_LITERAL("/sys/class/tty/tty0/active");
@@ -150,11 +140,6 @@ void GpuWatchdogThreadImplV1::OnForegrounded() {
base::Unretained(this)));
}
-void GpuWatchdogThreadImplV1::GpuWatchdogHistogram(
- GpuWatchdogThreadEvent thread_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
-}
-
bool GpuWatchdogThreadImplV1::IsGpuHangDetectedForTesting() {
return false;
}
@@ -166,7 +151,6 @@ void GpuWatchdogThreadImplV1::Init() {
void GpuWatchdogThreadImplV1::CleanUp() {
weak_factory_.InvalidateWeakPtrs();
- more_gpu_thread_time_allowed_ = false;
armed_ = false;
}
@@ -258,24 +242,11 @@ GpuWatchdogThreadImplV1::~GpuWatchdogThreadImplV1() {
#endif
base::MessageLoopCurrent::Get()->RemoveTaskObserver(&task_observer_);
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogEnd);
}
void GpuWatchdogThreadImplV1::OnAcknowledge() {
CHECK(base::PlatformThread::CurrentId() == GetThreadId());
- // For metrics only
- if (more_gpu_thread_time_allowed_) {
- base::TimeDelta wait_time =
- base::TimeTicks::Now() - last_timeout_timeticks_;
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.V1.ExtraThreadTime",
- wait_time, kMin, kMax, kBuckets);
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kProgressAfterMoreThreadTime);
-
- more_gpu_thread_time_allowed_ = false;
- }
-
// The check has already been acknowledged and another has already been
// scheduled by a previous call to OnAcknowledge. It is normal for a
// watched thread to see armed_ being true multiple times before
@@ -372,25 +343,11 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Should not get here while the system is suspended.
DCHECK(!suspension_counter_.HasRefs());
- base::TimeTicks function_start = base::TimeTicks::Now();
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeout);
-
- // If this metric is added too early (eg. watchdog creation time), it cannot
- // be persistent. The histogram data will be lost after crash or browser exit.
- // Delay the recording of kGpuWatchdogStart until the first OnCheckTimeout().
- if (!is_watchdog_start_histogram_recorded) {
- is_watchdog_start_histogram_recorded = true;
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogStart);
- }
-
// If the watchdog woke up significantly behind schedule, disarm and reset
// the watchdog check. This is to prevent the watchdog thread from terminating
// when a machine wakes up from sleep or hibernation, which would otherwise
// appear to be a hang.
if (base::Time::Now() > suspension_timeout_) {
- // Reset the timeticks after resume for metrics.
- last_timeout_timeticks_ = function_start;
-
OnAcknowledge();
return;
}
@@ -406,12 +363,6 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
base::ThreadTicks current_cpu_time = GetWatchedThreadTime();
base::TimeDelta time_since_arm = current_cpu_time - arm_cpu_time_;
if (use_thread_cpu_time_ && (time_since_arm < timeout_)) {
- // For metrics
- if (!more_gpu_thread_time_allowed_) {
- more_gpu_thread_time_allowed_ = true;
- last_timeout_timeticks_ = function_start;
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kMoreThreadTime);
- }
task_runner()->PostDelayedTask(
FROM_HERE,
@@ -421,7 +372,6 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
return;
}
#endif
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kTimeoutWait);
// For minimal developer annoyance, don't keep terminating. You need to skip
// the call to base::Process::Terminate below in a debugger for this to be
@@ -439,11 +389,6 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Don't crash if we're not on the TTY of our host X11 server.
UpdateActiveTTY();
if (host_tty_ != -1 && active_tty_ != -1 && host_tty_ != active_tty_) {
- // Only record for the time there is a change on TTY
- if (last_active_tty_ != active_tty_) {
- GpuWatchdogTimeoutHistogram(
- GpuWatchdogTimeoutEvent::kContinueOnNonHostServerTty);
- }
OnAcknowledge();
return;
}
@@ -504,23 +449,9 @@ void GpuWatchdogThreadImplV1::DeliberatelyTerminateToRecoverFromHang() {
// Check it one last time before crashing.
if (!base::subtle::NoBarrier_Load(&awaiting_acknowledge_)) {
- { // For metrics only
- base::TimeDelta wait_time;
- if (more_gpu_thread_time_allowed_) {
- more_gpu_thread_time_allowed_ = false;
- wait_time = base::TimeTicks::Now() - last_timeout_timeticks_;
- } else {
- wait_time = base::TimeTicks::Now() - function_start;
- }
- base::UmaHistogramCustomTimes("GPU.WatchdogThread.V1.WaitTime", wait_time,
- kMin, kMax, kBuckets);
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kProgressAfterWait);
- }
OnAcknowledge();
return;
}
- GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent::kKill);
- GpuWatchdogHistogram(GpuWatchdogThreadEvent::kGpuWatchdogKill);
// Deliberately crash the process to create a crash dump.
*((volatile int*)0) = 0x1337;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread.h b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
index b39ae227318..ad26565910a 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread.h
@@ -100,8 +100,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThread : public base::Thread,
// Continue the watchdog after a pause.
virtual void ResumeWatchdog() = 0;
- virtual void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) = 0;
-
// For gpu testing only. Return status for the watchdog tests
virtual bool IsGpuHangDetectedForTesting() = 0;
@@ -130,7 +128,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
void OnGpuProcessTearDown() override {}
void ResumeWatchdog() override {}
void PauseWatchdog() override {}
- void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
// gl::ProgressReporter implementation:
@@ -264,16 +261,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV1
base::Time check_time_;
base::TimeTicks check_timeticks_;
- // The time in the last OnCheckTimeout()
- base::TimeTicks last_timeout_timeticks_;
-
- // After GPU hang detected, whether the GPU thread is allowed to continue due
- // to not spending enough thread time.
- bool more_gpu_thread_time_allowed_ = false;
-
- // whether GpuWatchdogThreadEvent::kGpuWatchdogStart has been recorded.
- bool is_watchdog_start_histogram_recorded = false;
-
#if defined(USE_X11)
FILE* tty_file_;
int host_tty_;
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
index 5dc976af739..6df85496cfd 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.cc
@@ -284,7 +284,8 @@ void GpuWatchdogThreadImplV2::OnAddPowerObserver() {
DCHECK(watchdog_thread_task_runner_->BelongsToCurrentThread());
DCHECK(base::PowerMonitor::IsInitialized());
- is_power_observer_added_ = base::PowerMonitor::AddObserver(this);
+ base::PowerMonitor::AddObserver(this);
+ is_power_observer_added_ = true;
}
// Running on the watchdog thread.
@@ -660,7 +661,6 @@ void GpuWatchdogThreadImplV2::DeliberatelyTerminateToRecoverFromHang() {
void GpuWatchdogThreadImplV2::GpuWatchdogHistogram(
GpuWatchdogThreadEvent thread_event) {
- base::UmaHistogramEnumeration("GPU.WatchdogThread.Event.V2", thread_event);
base::UmaHistogramEnumeration("GPU.WatchdogThread.Event", thread_event);
}
diff --git a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
index 2e0fd292ebe..4c79e535b5e 100644
--- a/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
+++ b/chromium/gpu/ipc/service/gpu_watchdog_thread_v2.h
@@ -13,7 +13,7 @@ namespace gpu {
// If the actual time the watched GPU thread spent doing actual work is less
// than the wathdog timeout, the GPU thread can continue running through
// OnGPUWatchdogTimeout for at most 4 times before the gpu thread is killed.
-constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 4;
+constexpr int kMaxCountOfMoreGpuThreadTimeAllowed = 3;
#endif
constexpr int kMaxExtraCyclesBeforeKill = 0;
@@ -40,8 +40,6 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
void OnGpuProcessTearDown() override;
void ResumeWatchdog() override;
void PauseWatchdog() override;
- // Records "GPU.WatchdogThread.Event.V2" and "GPU.WatchdogThread.Event".
- void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event) override;
bool IsGpuHangDetectedForTesting() override;
void WaitForPowerObserverAddedForTesting() override;
@@ -91,6 +89,9 @@ class GPU_IPC_SERVICE_EXPORT GpuWatchdogThreadImplV2
// Do not change the function name. It is used for [GPU HANG] carsh reports.
void DeliberatelyTerminateToRecoverFromHang();
+ // Records "GPU.WatchdogThread.Event".
+ void GpuWatchdogHistogram(GpuWatchdogThreadEvent thread_event);
+
// Histogram recorded in OnWatchdogTimeout()
// Records "GPU.WatchdogThread.Timeout"
void GpuWatchdogTimeoutHistogram(GpuWatchdogTimeoutEvent timeout_event);
diff --git a/chromium/gpu/ipc/service/image_transport_surface_linux.cc b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
index c5c4d6ce7ed..05ce217a16d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_linux.cc
+++ b/chromium/gpu/ipc/service/image_transport_surface_linux.cc
@@ -5,7 +5,6 @@
#include "gpu/ipc/service/image_transport_surface.h"
#include "gpu/ipc/service/pass_through_image_transport_surface.h"
-#include "ui/gl/gl_surface_glx.h"
#include "ui/gl/init/gl_factory.h"
namespace gpu {
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
index a298747e3b7..d565d2ab23d 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.h
@@ -58,13 +58,31 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
bool IsOffscreen() override;
gfx::SwapResult SwapBuffers(
gl::GLSurface::PresentationCallback callback) override;
+ void SwapBuffersAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) override;
gfx::SwapResult PostSubBuffer(
int x,
int y,
int width,
int height,
gl::GLSurface::PresentationCallback callback) override;
+ void PostSubBufferAsync(
+ int x,
+ int y,
+ int width,
+ int height,
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) override;
+ gfx::SwapResult CommitOverlayPlanes(
+ gl::GLSurface::PresentationCallback callback) override;
+ void CommitOverlayPlanesAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) override;
+
bool SupportsPostSubBuffer() override;
+ bool SupportsCommitOverlayPlanes() override;
+ bool SupportsAsyncSwap() override;
gfx::Size GetSize() override;
void* GetHandle() override;
gl::GLSurfaceFormat GetFormat() override;
@@ -80,6 +98,7 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
void ScheduleCALayerInUseQuery(
std::vector<gl::GLSurface::CALayerInUseQuery> queries) override;
bool IsSurfaceless() const override;
+ gfx::SurfaceOrigin GetOrigin() const override;
// ui::GpuSwitchingObserver implementation.
void OnGpuSwitched(gl::GpuPreference active_gpu_heuristic) override;
@@ -88,8 +107,8 @@ class ImageTransportSurfaceOverlayMacBase : public BaseClass,
~ImageTransportSurfaceOverlayMacBase() override;
gfx::SwapResult SwapBuffersInternal(
- const gfx::Rect& pixel_damage_rect,
- gl::GLSurface::PresentationCallback callback);
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback);
void ApplyBackpressure();
void BufferPresented(gl::GLSurface::PresentationCallback callback,
const gfx::PresentationFeedback& feedback);
diff --git a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
index eb46993b8f4..f0dd2928aef 100644
--- a/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
+++ b/chromium/gpu/ipc/service/image_transport_surface_overlay_mac.mm
@@ -51,6 +51,15 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::
ca_layer_tree_coordinator_.reset(new ui::CALayerTreeCoordinator(
use_remote_layer_api_, allow_av_sample_buffer_display_layer));
+
+ // Create the CAContext to send this to the GPU process, and the layer for
+ // the context.
+ if (use_remote_layer_api_) {
+ CGSConnectionID connection_id = CGSMainConnectionID();
+ ca_context_.reset([[CAContext contextWithCGSConnection:connection_id
+ options:@{}] retain]);
+ [ca_context_ setLayer:ca_layer_tree_coordinator_->GetCALayerForDisplay()];
+ }
}
template <typename BaseClass>
@@ -63,14 +72,6 @@ ImageTransportSurfaceOverlayMacBase<
template <typename BaseClass>
bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Initialize(
gl::GLSurfaceFormat format) {
- // Create the CAContext to send this to the GPU process, and the layer for
- // the context.
- if (use_remote_layer_api_) {
- CGSConnectionID connection_id = CGSMainConnectionID();
- ca_context_.reset([
- [CAContext contextWithCGSConnection:connection_id options:@{}] retain]);
- [ca_context_ setLayer:ca_layer_tree_coordinator_->GetCALayerForDisplay()];
- }
return true;
}
@@ -112,8 +113,8 @@ void ImageTransportSurfaceOverlayMacBase<BaseClass>::BufferPresented(
template <typename BaseClass>
gfx::SwapResult
ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
- const gfx::Rect& pixel_damage_rect,
- gl::GLSurface::PresentationCallback callback) {
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
TRACE_EVENT0("gpu", "ImageTransportSurfaceOverlayMac::SwapBuffersInternal");
// Do a GL fence for flush to apply back-pressure before drawing.
@@ -126,7 +127,7 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
base::TimeTicks before_transaction_time = base::TimeTicks::Now();
{
TRACE_EVENT0("gpu", "CommitPendingTreesToCA");
- ca_layer_tree_coordinator_->CommitPendingTreesToCA(pixel_damage_rect);
+ ca_layer_tree_coordinator_->CommitPendingTreesToCA();
base::TimeTicks after_transaction_time = base::TimeTicks::Now();
UMA_HISTOGRAM_TIMES("GPU.IOSurface.CATransactionTime",
after_transaction_time - before_transaction_time);
@@ -173,6 +174,15 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
}
// Send the swap parameters to the browser.
+ if (completion_callback) {
+ base::ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ std::move(completion_callback),
+ gfx::SwapCompletionResult(
+ gfx::SwapResult::SWAP_ACK,
+ std::make_unique<gfx::CALayerParams>(params.ca_layer_params))));
+ }
delegate_->DidSwapBuffersComplete(std::move(params));
constexpr int64_t kRefreshIntervalInMicroseconds =
base::Time::kMicrosecondsPerSecond / 60;
@@ -184,7 +194,8 @@ ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersInternal(
FROM_HERE,
base::BindOnce(
&ImageTransportSurfaceOverlayMacBase<BaseClass>::BufferPresented,
- weak_ptr_factory_.GetWeakPtr(), std::move(callback), feedback));
+ weak_ptr_factory_.GetWeakPtr(), std::move(presentation_callback),
+ feedback));
return gfx::SwapResult::SWAP_ACK;
}
@@ -192,8 +203,15 @@ template <typename BaseClass>
gfx::SwapResult ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffers(
gl::GLSurface::PresentationCallback callback) {
return SwapBuffersInternal(
- gfx::Rect(0, 0, pixel_size_.width(), pixel_size_.height()),
- std::move(callback));
+ base::DoNothing(), std::move(callback));
+}
+
+template <typename BaseClass>
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::SwapBuffersAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
+ SwapBuffersInternal(
+ std::move(completion_callback), std::move(presentation_callback));
}
template <typename BaseClass>
@@ -203,8 +221,34 @@ gfx::SwapResult ImageTransportSurfaceOverlayMacBase<BaseClass>::PostSubBuffer(
int width,
int height,
gl::GLSurface::PresentationCallback callback) {
- return SwapBuffersInternal(gfx::Rect(x, y, width, height),
- std::move(callback));
+ return SwapBuffersInternal(base::DoNothing(), std::move(callback));
+}
+
+template <typename BaseClass>
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::PostSubBufferAsync(
+ int x,
+ int y,
+ int width,
+ int height,
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
+ SwapBuffersInternal(std::move(completion_callback),
+ std::move(presentation_callback));
+}
+
+template <typename BaseClass>
+gfx::SwapResult
+ImageTransportSurfaceOverlayMacBase<BaseClass>::CommitOverlayPlanes(
+ gl::GLSurface::PresentationCallback callback) {
+ return SwapBuffersInternal(base::DoNothing(), std::move(callback));
+}
+
+template <typename BaseClass>
+void ImageTransportSurfaceOverlayMacBase<BaseClass>::CommitOverlayPlanesAsync(
+ gl::GLSurface::SwapCompletionCallback completion_callback,
+ gl::GLSurface::PresentationCallback presentation_callback) {
+ SwapBuffersInternal(std::move(completion_callback),
+ std::move(presentation_callback));
}
template <typename BaseClass>
@@ -213,6 +257,17 @@ bool ImageTransportSurfaceOverlayMacBase<BaseClass>::SupportsPostSubBuffer() {
}
template <typename BaseClass>
+bool ImageTransportSurfaceOverlayMacBase<
+ BaseClass>::SupportsCommitOverlayPlanes() {
+ return true;
+}
+
+template <typename BaseClass>
+bool ImageTransportSurfaceOverlayMacBase<BaseClass>::SupportsAsyncSwap() {
+ return true;
+}
+
+template <typename BaseClass>
gfx::Size ImageTransportSurfaceOverlayMacBase<BaseClass>::GetSize() {
return gfx::Size();
}
@@ -304,6 +359,12 @@ bool ImageTransportSurfaceOverlayMacBase<BaseClass>::IsSurfaceless() const {
}
template <typename BaseClass>
+gfx::SurfaceOrigin ImageTransportSurfaceOverlayMacBase<BaseClass>::GetOrigin()
+ const {
+ return gfx::SurfaceOrigin::kTopLeft;
+}
+
+template <typename BaseClass>
bool ImageTransportSurfaceOverlayMacBase<BaseClass>::Resize(
const gfx::Size& pixel_size,
float scale_factor,
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
index fa58d426738..4d0703ea055 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.cc
@@ -272,17 +272,18 @@ void PassThroughImageTransportSurface::FinishSwapBuffersAsync(
SwapCompletionCallback callback,
gfx::SwapResponse response,
uint64_t local_swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence) {
+ gfx::SwapCompletionResult result) {
// TODO(afrantzis): It's probably not ideal to introduce a wait here.
// However, since this is a temporary step to maintain existing behavior
// until we are ready to expose the gpu_fence further, and fences are only
// enabled with a flag, this should be fine for now.
- if (gpu_fence)
- gpu_fence->Wait();
- response.result = result;
+ if (result.gpu_fence) {
+ result.gpu_fence->Wait();
+ result.gpu_fence.reset();
+ }
+ response.result = result.swap_result;
FinishSwapBuffers(std::move(response), local_swap_id);
- std::move(callback).Run(result, nullptr);
+ std::move(callback).Run(std::move(result));
}
void PassThroughImageTransportSurface::BufferPresented(
diff --git a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
index e463dc1e95b..373221bc923 100644
--- a/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
+++ b/chromium/gpu/ipc/service/pass_through_image_transport_surface.h
@@ -63,8 +63,7 @@ class PassThroughImageTransportSurface : public gl::GLSurfaceAdapter {
void FinishSwapBuffersAsync(SwapCompletionCallback callback,
gfx::SwapResponse response,
uint64_t local_swap_id,
- gfx::SwapResult result,
- std::unique_ptr<gfx::GpuFence> gpu_fence);
+ gfx::SwapCompletionResult result);
void BufferPresented(PresentationCallback callback,
uint64_t local_swap_id,
diff --git a/chromium/gpu/ipc/service/shared_image_stub.cc b/chromium/gpu/ipc/service/shared_image_stub.cc
index 30789237258..4dac0b496fb 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.cc
+++ b/chromium/gpu/ipc/service/shared_image_stub.cc
@@ -354,13 +354,16 @@ void SharedImageStub::OnPresentSwapChain(const Mailbox& mailbox,
#if defined(OS_FUCHSIA)
void SharedImageStub::OnRegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
if (!id || !token) {
OnError();
return;
}
- if (!factory_->RegisterSysmemBufferCollection(id, std::move(token))) {
+ if (!factory_->RegisterSysmemBufferCollection(id, std::move(token), format,
+ usage)) {
OnError();
}
}
@@ -389,7 +392,7 @@ void SharedImageStub::OnRegisterSharedImageUploadBuffer(
}
}
-bool SharedImageStub::MakeContextCurrent() {
+bool SharedImageStub::MakeContextCurrent(bool needs_gl) {
DCHECK(context_state_);
if (context_state_->context_lost()) {
@@ -400,13 +403,9 @@ bool SharedImageStub::MakeContextCurrent() {
// |factory_| never writes to the surface, so pass nullptr to
// improve performance. https://crbug.com/457431
auto* context = context_state_->real_context();
- if (context->IsCurrent(nullptr) ||
- context->MakeCurrent(context_state_->surface())) {
- return true;
- }
- context_state_->MarkContextLost();
- LOG(ERROR) << "SharedImageStub: MakeCurrent failed";
- return false;
+ if (context->IsCurrent(nullptr))
+ return !context_state_->CheckResetStatus(needs_gl);
+ return context_state_->MakeCurrent(/*surface=*/nullptr, needs_gl);
}
ContextResult SharedImageStub::MakeContextCurrentAndCreateFactory() {
@@ -421,7 +420,9 @@ ContextResult SharedImageStub::MakeContextCurrentAndCreateFactory() {
}
DCHECK(context_state_);
DCHECK(!context_state_->context_lost());
- if (!MakeContextCurrent()) {
+ // Some shared image backing factories will use GL in ctor, so we need GL even
+ // if chrome is using non-GL backing.
+ if (!MakeContextCurrent(/*needs_gl=*/true)) {
context_state_ = nullptr;
return ContextResult::kTransientFailure;
}
diff --git a/chromium/gpu/ipc/service/shared_image_stub.h b/chromium/gpu/ipc/service/shared_image_stub.h
index 1bc71f842cd..781c1dc55b7 100644
--- a/chromium/gpu/ipc/service/shared_image_stub.h
+++ b/chromium/gpu/ipc/service/shared_image_stub.h
@@ -87,11 +87,13 @@ class GPU_IPC_SERVICE_EXPORT SharedImageStub
#endif // OS_WIN
#if defined(OS_FUCHSIA)
void OnRegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token);
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage);
void OnReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id);
#endif // OS_FUCHSIA
- bool MakeContextCurrent();
+ bool MakeContextCurrent(bool needs_gl = false);
ContextResult MakeContextCurrentAndCreateFactory();
void OnError();
diff --git a/chromium/gpu/ipc/service/stream_texture_android.cc b/chromium/gpu/ipc/service/stream_texture_android.cc
index 74faadd37b9..bae8cca172f 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.cc
+++ b/chromium/gpu/ipc/service/stream_texture_android.cc
@@ -128,17 +128,6 @@ void StreamTexture::ReleaseChannel() {
channel_ = nullptr;
}
-// gpu::gles2::GLStreamTextureMatrix implementation
-void StreamTexture::GetTextureMatrix(float xform[16]) {
- static constexpr float kIdentity[16]{
- 1, 0, 0, 0, //
- 0, 1, 0, 0, //
- 0, 0, 1, 0, //
- 0, 0, 0, 1 //
- };
- memcpy(xform, kIdentity, sizeof(kIdentity));
-}
-
bool StreamTexture::IsUsingGpuMemory() const {
// Once the image is bound during the first update, we just replace/update the
// same image every time in future and hence the image is always bound to a
@@ -236,8 +225,12 @@ void StreamTexture::OnFrameAvailable() {
gfx::Rect visible_rect;
gfx::Size coded_size;
- texture_owner_->GetCodedSizeAndVisibleRect(rotated_visible_size_, &coded_size,
- &visible_rect);
+ if (!texture_owner_->GetCodedSizeAndVisibleRect(rotated_visible_size_,
+ &coded_size, &visible_rect)) {
+ // if we failed to get right size fallback to visible size.
+ coded_size = rotated_visible_size_;
+ visible_rect = gfx::Rect(coded_size);
+ }
if (coded_size != coded_size_ || visible_rect != visible_rect_) {
coded_size_ = coded_size;
diff --git a/chromium/gpu/ipc/service/stream_texture_android.h b/chromium/gpu/ipc/service/stream_texture_android.h
index d99197dac47..6fedde43727 100644
--- a/chromium/gpu/ipc/service/stream_texture_android.h
+++ b/chromium/gpu/ipc/service/stream_texture_android.h
@@ -81,7 +81,6 @@ class StreamTexture : public StreamTextureSharedImageInterface,
GetAHardwareBuffer() override;
// gpu::gles2::GLStreamTextureMatrix implementation
- void GetTextureMatrix(float xform[16]) override;
void NotifyPromotionHint(bool promotion_hint,
int display_x,
int display_y,
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.cc b/chromium/gpu/ipc/shared_image_interface_in_process.cc
index 715ea24292b..744162df245 100644
--- a/chromium/gpu/ipc/shared_image_interface_in_process.cc
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.cc
@@ -94,7 +94,7 @@ void SharedImageInterfaceInProcess::DestroyOnGpu(
completion->Signal();
}
-bool SharedImageInterfaceInProcess::MakeContextCurrent() {
+bool SharedImageInterfaceInProcess::MakeContextCurrent(bool needs_gl) {
if (!context_state_)
return false;
@@ -104,12 +104,9 @@ bool SharedImageInterfaceInProcess::MakeContextCurrent() {
// |shared_image_factory_| never writes to the surface, so skip unnecessary
// MakeCurrent to improve performance. https://crbug.com/457431
auto* context = context_state_->real_context();
- if (context->IsCurrent(nullptr) ||
- context->MakeCurrent(context_state_->surface()))
- return true;
-
- context_state_->MarkContextLost();
- return false;
+ if (context->IsCurrent(nullptr))
+ return !context_state_->CheckResetStatus(needs_gl);
+ return context_state_->MakeCurrent(/*surface=*/nullptr, needs_gl);
}
void SharedImageInterfaceInProcess::LazyCreateSharedImageFactory() {
@@ -117,6 +114,11 @@ void SharedImageInterfaceInProcess::LazyCreateSharedImageFactory() {
if (shared_image_factory_)
return;
+ // Some shared image backing factories will use GL in ctor, so we need GL even
+ // if chrome is using non-GL backing.
+ if (!MakeContextCurrent(/*needs_gl=*/true))
+ return;
+
// We need WrappedSkImage to support creating a SharedImage with pixel data
// when GL is unavailable. This is used in various unit tests.
const bool enable_wrapped_sk_image =
@@ -308,7 +310,9 @@ void SharedImageInterfaceInProcess::PresentSwapChain(
#if defined(OS_FUCHSIA)
void SharedImageInterfaceInProcess::RegisterSysmemBufferCollection(
gfx::SysmemBufferCollectionId id,
- zx::channel token) {
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) {
NOTREACHED();
}
void SharedImageInterfaceInProcess::ReleaseSysmemBufferCollection(
@@ -383,6 +387,16 @@ void SharedImageInterfaceInProcess::DestroySharedImageOnGpuThread(
}
}
+void SharedImageInterfaceInProcess::WaitSyncTokenOnGpuThread(
+ const SyncToken& sync_token) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(gpu_sequence_checker_);
+ if (!MakeContextCurrent())
+ return;
+
+ mailbox_manager_->PushTextureUpdates(sync_token);
+ sync_point_client_state_->ReleaseFenceSync(sync_token.release_count());
+}
+
SyncToken SharedImageInterfaceInProcess::GenUnverifiedSyncToken() {
base::AutoLock lock(lock_);
return MakeSyncToken(next_fence_sync_release_ - 1);
@@ -395,6 +409,16 @@ SyncToken SharedImageInterfaceInProcess::GenVerifiedSyncToken() {
return sync_token;
}
+void SharedImageInterfaceInProcess::WaitSyncToken(const SyncToken& sync_token) {
+ base::AutoLock lock(lock_);
+
+ ScheduleGpuTask(
+ base::BindOnce(&SharedImageInterfaceInProcess::WaitSyncTokenOnGpuThread,
+ base::Unretained(this),
+ MakeSyncToken(next_fence_sync_release_++)),
+ {sync_token});
+}
+
void SharedImageInterfaceInProcess::Flush() {
// No need to flush in this implementation.
}
diff --git a/chromium/gpu/ipc/shared_image_interface_in_process.h b/chromium/gpu/ipc/shared_image_interface_in_process.h
index 60b1a3dc318..714911006e9 100644
--- a/chromium/gpu/ipc/shared_image_interface_in_process.h
+++ b/chromium/gpu/ipc/shared_image_interface_in_process.h
@@ -113,7 +113,9 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
#if defined(OS_FUCHSIA)
// Registers a sysmem buffer collection. Not reached in this implementation.
void RegisterSysmemBufferCollection(gfx::SysmemBufferCollectionId id,
- zx::channel token) override;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) override;
// Not reached in this implementation.
void ReleaseSysmemBufferCollection(gfx::SysmemBufferCollectionId id) override;
@@ -127,6 +129,8 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
// commands on this interface have executed on the service side.
SyncToken GenVerifiedSyncToken() override;
+ void WaitSyncToken(const SyncToken& sync_token) override;
+
// Flush the SharedImageInterface, issuing any deferred IPCs.
void Flush() override;
@@ -150,7 +154,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
std::vector<SyncToken> sync_token_fences);
// Only called on the gpu thread.
- bool MakeContextCurrent();
+ bool MakeContextCurrent(bool needs_gl = false);
void LazyCreateSharedImageFactory();
void CreateSharedImageOnGpuThread(const Mailbox& mailbox,
viz::ResourceFormat format,
@@ -177,6 +181,7 @@ class GL_IN_PROCESS_CONTEXT_EXPORT SharedImageInterfaceInProcess
void UpdateSharedImageOnGpuThread(const Mailbox& mailbox,
const SyncToken& sync_token);
void DestroySharedImageOnGpuThread(const Mailbox& mailbox);
+ void WaitSyncTokenOnGpuThread(const SyncToken& sync_token);
void WrapTaskWithGpuUrl(base::OnceClosure task);
// Used to schedule work on the gpu thread. This is a raw pointer for now
diff --git a/chromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py b/chromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py
index 91de5cf4c33..b55379421dd 100755
--- a/chromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py
+++ b/chromium/gpu/khronos_glcts_support/generate_khronos_glcts_tests.py
@@ -37,9 +37,8 @@ def ReadFileAsLines(filename):
Reads a file, yielding each non-blank line
and lines that don't begin with #
"""
- file = open(filename, "r")
- lines = file.readlines()
- file.close()
+ with open(filename, "r") as in_file:
+ lines = in_file.readlines()
for line in lines:
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
diff --git a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
index 12bef6d06e4..adf521bd01e 100644
--- a/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
+++ b/chromium/gpu/skia_bindings/gl_bindings_skia_cmd_buffer.cc
@@ -4,6 +4,7 @@
#include "gpu/skia_bindings/gl_bindings_skia_cmd_buffer.h"
+#include "base/logging.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "third_party/skia/include/gpu/gl/GrGLInterface.h"
diff --git a/chromium/gpu/skia_bindings/grcontext_for_webgpu_interface.cc b/chromium/gpu/skia_bindings/grcontext_for_webgpu_interface.cc
index 17a38606c27..47a79cd928a 100644
--- a/chromium/gpu/skia_bindings/grcontext_for_webgpu_interface.cc
+++ b/chromium/gpu/skia_bindings/grcontext_for_webgpu_interface.cc
@@ -4,6 +4,7 @@
#include "gpu/skia_bindings/grcontext_for_webgpu_interface.h"
+#include "base/logging.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/webgpu_interface.h"
#include "gpu/command_buffer/common/capabilities.h"
diff --git a/chromium/gpu/vulkan/BUILD.gn b/chromium/gpu/vulkan/BUILD.gn
index 95d013e8bea..e0559150895 100644
--- a/chromium/gpu/vulkan/BUILD.gn
+++ b/chromium/gpu/vulkan/BUILD.gn
@@ -85,6 +85,7 @@ if (enable_vulkan) {
"vulkan_command_pool.h",
"vulkan_crash_keys.cc",
"vulkan_crash_keys.h",
+ "vulkan_cxx.h",
"vulkan_device_queue.cc",
"vulkan_device_queue.h",
"vulkan_fence_helper.cc",
@@ -203,6 +204,7 @@ if (enable_vulkan) {
"tests/basic_vulkan_test.h",
"tests/vulkan_test.cc",
"tests/vulkan_tests_main.cc",
+ "vulkan_cxx_unittest.cc",
"vulkan_fence_helper_unittest.cc",
"vulkan_image_unittest.cc",
]
diff --git a/chromium/gpu/vulkan/PRESUBMIT.py b/chromium/gpu/vulkan/PRESUBMIT.py
index c12a8c8de34..6deb715ca00 100644
--- a/chromium/gpu/vulkan/PRESUBMIT.py
+++ b/chromium/gpu/vulkan/PRESUBMIT.py
@@ -24,8 +24,8 @@ def CommonChecks(input_api, output_api):
if generated_files and not generating_files:
long_text = 'Changed files:\n'
- for file in generated_files:
- long_text += file.LocalPath() + '\n'
+ for generated_file in generated_files:
+ long_text += generated_file.LocalPath() + '\n'
long_text += '\n'
messages.append(output_api.PresubmitError(
'Vulkan function pointer generated files changed but the generator '
diff --git a/chromium/gpu/vulkan/demo/vulkan_demo.cc b/chromium/gpu/vulkan/demo/vulkan_demo.cc
index e46d726fb99..b8dc416e517 100644
--- a/chromium/gpu/vulkan/demo/vulkan_demo.cc
+++ b/chromium/gpu/vulkan/demo/vulkan_demo.cc
@@ -204,6 +204,7 @@ void VulkanDemo::RenderFrame() {
.fSignalSemaphores = &semaphore,
};
sk_surface_->flush(SkSurface::BackendSurfaceAccess::kPresent, flush_info);
+ sk_surface_->getContext()->submit();
auto backend = sk_surface_->getBackendRenderTarget(
SkSurface::kFlushRead_BackendHandleAccess);
GrVkImageInfo vk_image_info;
diff --git a/chromium/gpu/vulkan/generate_bindings.py b/chromium/gpu/vulkan/generate_bindings.py
index eed96c896b9..1067f8fa5c2 100755
--- a/chromium/gpu/vulkan/generate_bindings.py
+++ b/chromium/gpu/vulkan/generate_bindings.py
@@ -22,11 +22,13 @@ from reg import Registry
registry = Registry()
registry.loadFile(open(path.join(vulkan_reg_path, "vk.xml")))
+VULKAN_REQUIRED_API_VERSION = 'VK_API_VERSION_1_1'
+
VULKAN_UNASSOCIATED_FUNCTIONS = [
{
'functions': [
# vkGetInstanceProcAddr belongs here but is handled specially.
- # vkEnumerateInstanceVersion belongs here but is handled specially.
+ 'vkEnumerateInstanceVersion',
'vkCreateInstance',
'vkEnumerateInstanceExtensionProperties',
'vkEnumerateInstanceLayerProperties',
@@ -43,9 +45,11 @@ VULKAN_INSTANCE_FUNCTIONS = [
'vkEnumerateDeviceLayerProperties',
'vkEnumeratePhysicalDevices',
'vkGetDeviceProcAddr',
- 'vkGetPhysicalDeviceFeatures',
+ 'vkGetPhysicalDeviceFeatures2',
'vkGetPhysicalDeviceFormatProperties',
+ 'vkGetPhysicalDeviceImageFormatProperties2',
'vkGetPhysicalDeviceMemoryProperties',
+ 'vkGetPhysicalDeviceMemoryProperties2',
'vkGetPhysicalDeviceProperties',
'vkGetPhysicalDeviceQueueFamilyProperties',
]
@@ -97,22 +101,6 @@ VULKAN_INSTANCE_FUNCTIONS = [
'vkCreateImagePipeSurfaceFUCHSIA',
]
},
- {
- 'min_api_version': 'VK_API_VERSION_1_1',
- 'functions': [
- 'vkGetPhysicalDeviceImageFormatProperties2',
- ]
- },
- {
- # vkGetPhysicalDeviceFeatures2() is defined in Vulkan 1.1 or suffixed in the
- # VK_KHR_get_physical_device_properties2 extension.
- 'min_api_version': 'VK_API_VERSION_1_1',
- 'extension': 'VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME',
- 'extension_suffix': 'KHR',
- 'functions': [
- 'vkGetPhysicalDeviceFeatures2',
- ]
- },
]
VULKAN_DEVICE_FUNCTIONS = [
@@ -123,7 +111,9 @@ VULKAN_DEVICE_FUNCTIONS = [
'vkAllocateMemory',
'vkBeginCommandBuffer',
'vkBindBufferMemory',
+ 'vkBindBufferMemory2',
'vkBindImageMemory',
+ 'vkBindImageMemory2',
'vkCmdBeginRenderPass',
'vkCmdCopyBuffer',
'vkCmdCopyBufferToImage',
@@ -164,9 +154,12 @@ VULKAN_DEVICE_FUNCTIONS = [
'vkFreeMemory',
'vkInvalidateMappedMemoryRanges',
'vkGetBufferMemoryRequirements',
+ 'vkGetBufferMemoryRequirements2',
'vkGetDeviceQueue',
+ 'vkGetDeviceQueue2',
'vkGetFenceStatus',
'vkGetImageMemoryRequirements',
+ 'vkGetImageMemoryRequirements2',
'vkMapMemory',
'vkQueueSubmit',
'vkQueueWaitIdle',
@@ -178,14 +171,6 @@ VULKAN_DEVICE_FUNCTIONS = [
]
},
{
- 'min_api_version': 'VK_API_VERSION_1_1',
- 'functions': [
- 'vkGetDeviceQueue2',
- 'vkGetBufferMemoryRequirements2',
- 'vkGetImageMemoryRequirements2',
- ]
- },
- {
'ifdef': 'defined(OS_ANDROID)',
'extension':
'VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME',
@@ -278,10 +263,11 @@ LICENSE_AND_HEADER = """\
"""
-def WriteFunctionsInternal(file, functions, gen_content, check_extension=False):
+def WriteFunctionsInternal(out_file, functions, gen_content,
+ check_extension=False):
for group in functions:
if 'ifdef' in group:
- file.write('#if %s\n' % group['ifdef'])
+ out_file.write('#if %s\n' % group['ifdef'])
extension = group['extension'] if 'extension' in group else ''
min_api_version = \
@@ -289,53 +275,53 @@ def WriteFunctionsInternal(file, functions, gen_content, check_extension=False):
if not check_extension:
for func in group['functions']:
- file.write(gen_content(func))
+ out_file.write(gen_content(func))
elif not extension and not min_api_version:
for func in group['functions']:
- file.write(gen_content(func))
+ out_file.write(gen_content(func))
else:
if min_api_version:
- file.write(' if (api_version >= %s) {\n' % min_api_version)
+ out_file.write(' if (api_version >= %s) {\n' % min_api_version)
for func in group['functions']:
- file.write(
+ out_file.write(
gen_content(func))
- file.write('}\n')
+ out_file.write('}\n')
if extension:
- file.write('else ')
+ out_file.write('else ')
if extension:
- file.write('if (gfx::HasExtension(enabled_extensions, %s)) {\n' %
+ out_file.write('if (gfx::HasExtension(enabled_extensions, %s)) {\n' %
extension)
extension_suffix = \
group['extension_suffix'] if 'extension_suffix' in group \
else ''
for func in group['functions']:
- file.write(gen_content(func, extension_suffix))
+ out_file.write(gen_content(func, extension_suffix))
- file.write('}\n')
+ out_file.write('}\n')
if 'ifdef' in group:
- file.write('#endif // %s\n' % group['ifdef'])
- file.write('\n')
+ out_file.write('#endif // %s\n' % group['ifdef'])
+ out_file.write('\n')
-def WriteFunctions(file, functions, template, check_extension=False):
+def WriteFunctions(out_file, functions, template, check_extension=False):
def gen_content(func, suffix=''):
return template.substitute({'name': func,'extension_suffix': suffix})
- WriteFunctionsInternal(file, functions, gen_content, check_extension)
+ WriteFunctionsInternal(out_file, functions, gen_content, check_extension)
-def WriteFunctionDeclarations(file, functions):
- template = Template(' VulkanFunction<PFN_${name}> ${name}Fn;\n')
- WriteFunctions(file, functions, template)
+def WriteFunctionDeclarations(out_file, functions):
+ template = Template(' VulkanFunction<PFN_${name}> ${name};\n')
+ WriteFunctions(out_file, functions, template)
-def WriteMacros(file, functions):
+def WriteMacros(out_file, functions):
def gen_content(func, suffix=''):
if func not in registry.cmddict:
# Some fuchsia functions are not in the vulkan registry, so use macro for
# them.
template = Template(
- '#define $name gpu::GetVulkanFunctionPointers()->${name}Fn\n')
+ '#define $name gpu::GetVulkanFunctionPointers()->${name}\n')
return template.substitute({'name': func, 'extension_suffix' : suffix})
none_str = lambda s: s if s else ''
cmd = registry.cmddict[func].elem
@@ -348,7 +334,7 @@ def WriteMacros(file, functions):
pdecl += text + tail
n = len(params)
- callstat = 'return gpu::GetVulkanFunctionPointers()->%sFn(' % func
+ callstat = 'return gpu::GetVulkanFunctionPointers()->%s(' % func
paramdecl = '('
if n > 0:
paramnames = (''.join(t for t in p.itertext())
@@ -364,12 +350,12 @@ def WriteMacros(file, functions):
pdecl += paramdecl
return 'ALWAYS_INLINE %s { %s; }\n' % (pdecl, callstat)
- WriteFunctionsInternal(file, functions, gen_content)
+ WriteFunctionsInternal(out_file, functions, gen_content)
-def GenerateHeaderFile(file):
+def GenerateHeaderFile(out_file):
"""Generates gpu/vulkan/vulkan_function_pointers.h"""
- file.write(LICENSE_AND_HEADER +
+ out_file.write(LICENSE_AND_HEADER +
"""
#ifndef GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_
@@ -408,6 +394,8 @@ namespace gpu {
struct VulkanFunctionPointers;
+constexpr uint32_t kVulkanRequiredApiVersion = %s;
+
COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers* GetVulkanFunctionPointers();
struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
@@ -437,12 +425,12 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
public:
using Fn = R(VKAPI_PTR*)(Args...);
- explicit operator bool() {
+ explicit operator bool() const {
return !!fn_;
}
NO_SANITIZE("cfi-icall")
- R operator()(Args... args) {
+ R operator()(Args... args) const {
return fn_(args...);
}
@@ -460,28 +448,27 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
};
// Unassociated functions
- VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersionFn;
- VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddrFn;
+ VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddr;
-""")
+""" % VULKAN_REQUIRED_API_VERSION)
- WriteFunctionDeclarations(file, VULKAN_UNASSOCIATED_FUNCTIONS)
+ WriteFunctionDeclarations(out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
// Instance functions
""")
- WriteFunctionDeclarations(file, VULKAN_INSTANCE_FUNCTIONS);
+ WriteFunctionDeclarations(out_file, VULKAN_INSTANCE_FUNCTIONS);
- file.write("""\
+ out_file.write("""\
// Device functions
""")
- WriteFunctionDeclarations(file, VULKAN_DEVICE_FUNCTIONS)
+ WriteFunctionDeclarations(out_file, VULKAN_DEVICE_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
};
} // namespace gpu
@@ -489,33 +476,32 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
// Unassociated functions
""")
- WriteMacros(file, [{'functions': [ 'vkGetInstanceProcAddr' ,
- 'vkEnumerateInstanceVersion']}])
- WriteMacros(file, VULKAN_UNASSOCIATED_FUNCTIONS)
+ WriteMacros(out_file, [{'functions': [ 'vkGetInstanceProcAddr']}])
+ WriteMacros(out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
// Instance functions
""")
- WriteMacros(file, VULKAN_INSTANCE_FUNCTIONS);
+ WriteMacros(out_file, VULKAN_INSTANCE_FUNCTIONS);
- file.write("""\
+ out_file.write("""\
// Device functions
""")
- WriteMacros(file, VULKAN_DEVICE_FUNCTIONS)
+ WriteMacros(out_file, VULKAN_DEVICE_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_""")
-def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
+def WriteFunctionPointerInitialization(out_file, proc_addr_function, parent,
functions):
- template = Template(""" ${name}Fn = reinterpret_cast<PFN_${name}>(
+ template = Template(""" ${name} = reinterpret_cast<PFN_${name}>(
${get_proc_addr}(${parent}, "${name}${extension_suffix}"));
- if (!${name}Fn) {
+ if (!${name}) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "${name}${extension_suffix}";
return false;
@@ -529,24 +515,24 @@ def WriteFunctionPointerInitialization(file, proc_addr_function, parent,
'name': '${name}', 'extension_suffix': '${extension_suffix}',
'get_proc_addr': proc_addr_function, 'parent': parent}))
- WriteFunctions(file, functions, template, check_extension=True)
+ WriteFunctions(out_file, functions, template, check_extension=True)
-def WriteUnassociatedFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddr', 'nullptr',
- functions)
+def WriteUnassociatedFunctionPointerInitialization(out_file, functions):
+ WriteFunctionPointerInitialization(out_file, 'vkGetInstanceProcAddr',
+ 'nullptr', functions)
-def WriteInstanceFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetInstanceProcAddr',
+def WriteInstanceFunctionPointerInitialization(out_file, functions):
+ WriteFunctionPointerInitialization(out_file, 'vkGetInstanceProcAddr',
'vk_instance', functions)
-def WriteDeviceFunctionPointerInitialization(file, functions):
- WriteFunctionPointerInitialization(file, 'vkGetDeviceProcAddr', 'vk_device',
- functions)
+def WriteDeviceFunctionPointerInitialization(out_file, functions):
+ WriteFunctionPointerInitialization(out_file, 'vkGetDeviceProcAddr',
+ 'vk_device', functions)
-def GenerateSourceFile(file):
+def GenerateSourceFile(out_file):
"""Generates gpu/vulkan/vulkan_function_pointers.cc"""
- file.write(LICENSE_AND_HEADER +
+ out_file.write(LICENSE_AND_HEADER +
"""
#include "gpu/vulkan/vulkan_function_pointers.h"
@@ -568,23 +554,17 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
// vkGetInstanceProcAddr must be handled specially since it gets its function
// pointer through base::GetFunctionPOinterFromNativeLibrary(). Other Vulkan
// functions don't do this.
- vkGetInstanceProcAddrFn = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+ vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
- if (!vkGetInstanceProcAddrFn)
+ if (!vkGetInstanceProcAddr)
return false;
-
- vkEnumerateInstanceVersionFn =
- reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
- vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
- // vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
- // proceed even if we fail to get vkEnumerateInstanceVersion pointer.
""")
WriteUnassociatedFunctionPointerInitialization(
- file, VULKAN_UNASSOCIATED_FUNCTIONS)
+ out_file, VULKAN_UNASSOCIATED_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
return true;
}
@@ -593,11 +573,13 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VkInstance vk_instance,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
""")
- WriteInstanceFunctionPointerInitialization(file, VULKAN_INSTANCE_FUNCTIONS);
+ WriteInstanceFunctionPointerInitialization(
+ out_file, VULKAN_INSTANCE_FUNCTIONS);
- file.write("""\
+ out_file.write("""\
return true;
}
@@ -606,11 +588,12 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
VkDevice vk_device,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
// Device functions
""")
- WriteDeviceFunctionPointerInitialization(file, VULKAN_DEVICE_FUNCTIONS)
+ WriteDeviceFunctionPointerInitialization(out_file, VULKAN_DEVICE_FUNCTIONS)
- file.write("""\
+ out_file.write("""\
return true;
}
diff --git a/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc b/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
index 4192cd859ce..0a900d01805 100644
--- a/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
+++ b/chromium/gpu/vulkan/init/gr_vk_memory_allocator_impl.cc
@@ -24,9 +24,9 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
GrVkMemoryAllocatorImpl& operator=(const GrVkMemoryAllocatorImpl&) = delete;
private:
- bool allocateMemoryForImage(VkImage image,
- AllocationPropertyFlags flags,
- GrVkBackendMemory* backend_memory) override {
+ VkResult allocateImageMemory(VkImage image,
+ AllocationPropertyFlags flags,
+ GrVkBackendMemory* backend_memory) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::allocateMemoryForImage");
VmaAllocationCreateInfo info;
@@ -53,17 +53,15 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
VmaAllocation allocation;
VkResult result = vma::AllocateMemoryForImage(allocator_, image, &info,
&allocation, nullptr);
- if (VK_SUCCESS != result) {
- return false;
- }
- *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
- return true;
+ if (VK_SUCCESS == result)
+ *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
+ return result;
}
- bool allocateMemoryForBuffer(VkBuffer buffer,
- BufferUsage usage,
- AllocationPropertyFlags flags,
- GrVkBackendMemory* backend_memory) override {
+ VkResult allocateBufferMemory(VkBuffer buffer,
+ BufferUsage usage,
+ AllocationPropertyFlags flags,
+ GrVkBackendMemory* backend_memory) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::allocateMemoryForBuffer");
VmaAllocationCreateInfo info;
@@ -121,12 +119,11 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
&allocation, nullptr);
}
}
- if (VK_SUCCESS != result) {
- return false;
- }
- *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
- return true;
+ if (VK_SUCCESS == result)
+ *backend_memory = reinterpret_cast<GrVkBackendMemory>(allocation);
+
+ return result;
}
void freeMemory(const GrVkBackendMemory& memory) override {
@@ -162,15 +159,12 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
alloc->fBackendMemory = memory;
}
- void* mapMemory(const GrVkBackendMemory& memory) override {
+ VkResult mapMemory(const GrVkBackendMemory& memory, void** data) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::mapMemory");
const VmaAllocation allocation =
reinterpret_cast<const VmaAllocation>(memory);
- void* data;
- return vma::MapMemory(allocator_, allocation, &data) == VK_SUCCESS
- ? data
- : nullptr;
+ return vma::MapMemory(allocator_, allocation, data);
}
void unmapMemory(const GrVkBackendMemory& memory) override {
@@ -181,24 +175,24 @@ class GrVkMemoryAllocatorImpl : public GrVkMemoryAllocator {
vma::UnmapMemory(allocator_, allocation);
}
- void flushMappedMemory(const GrVkBackendMemory& memory,
- VkDeviceSize offset,
- VkDeviceSize size) override {
+ VkResult flushMemory(const GrVkBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::flushMappedMemory");
const VmaAllocation allocation =
reinterpret_cast<const VmaAllocation>(memory);
- vma::FlushAllocation(allocator_, allocation, offset, size);
+ return vma::FlushAllocation(allocator_, allocation, offset, size);
}
- void invalidateMappedMemory(const GrVkBackendMemory& memory,
- VkDeviceSize offset,
- VkDeviceSize size) override {
+ VkResult invalidateMemory(const GrVkBackendMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size) override {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("gpu.vulkan.vma"),
"GrVkMemoryAllocatorImpl::invalidateMappedMemory");
const VmaAllocation allocation =
reinterpret_cast<const VmaAllocation>(memory);
- vma::InvalidateAllocation(allocator_, allocation, offset, size);
+ return vma::InvalidateAllocation(allocator_, allocation, offset, size);
}
uint64_t totalUsedMemory() const override {
diff --git a/chromium/gpu/vulkan/vma_wrapper.cc b/chromium/gpu/vulkan/vma_wrapper.cc
index 1c8d72f598d..d0541c92ad6 100644
--- a/chromium/gpu/vulkan/vma_wrapper.cc
+++ b/chromium/gpu/vulkan/vma_wrapper.cc
@@ -18,27 +18,31 @@ VkResult CreateAllocator(VkPhysicalDevice physical_device,
VmaAllocator* pAllocator) {
auto* function_pointers = gpu::GetVulkanFunctionPointers();
VmaVulkanFunctions functions = {
- function_pointers->vkGetPhysicalDevicePropertiesFn.get(),
- function_pointers->vkGetPhysicalDeviceMemoryPropertiesFn.get(),
- function_pointers->vkAllocateMemoryFn.get(),
- function_pointers->vkFreeMemoryFn.get(),
- function_pointers->vkMapMemoryFn.get(),
- function_pointers->vkUnmapMemoryFn.get(),
- function_pointers->vkFlushMappedMemoryRangesFn.get(),
- function_pointers->vkInvalidateMappedMemoryRangesFn.get(),
- function_pointers->vkBindBufferMemoryFn.get(),
- function_pointers->vkBindImageMemoryFn.get(),
- function_pointers->vkGetBufferMemoryRequirementsFn.get(),
- function_pointers->vkGetImageMemoryRequirementsFn.get(),
- function_pointers->vkCreateBufferFn.get(),
- function_pointers->vkDestroyBufferFn.get(),
- function_pointers->vkCreateImageFn.get(),
- function_pointers->vkDestroyImageFn.get(),
- function_pointers->vkCmdCopyBufferFn.get(),
- function_pointers->vkGetBufferMemoryRequirements2Fn.get(),
- function_pointers->vkGetImageMemoryRequirements2Fn.get(),
+ function_pointers->vkGetPhysicalDeviceProperties.get(),
+ function_pointers->vkGetPhysicalDeviceMemoryProperties.get(),
+ function_pointers->vkAllocateMemory.get(),
+ function_pointers->vkFreeMemory.get(),
+ function_pointers->vkMapMemory.get(),
+ function_pointers->vkUnmapMemory.get(),
+ function_pointers->vkFlushMappedMemoryRanges.get(),
+ function_pointers->vkInvalidateMappedMemoryRanges.get(),
+ function_pointers->vkBindBufferMemory.get(),
+ function_pointers->vkBindImageMemory.get(),
+ function_pointers->vkGetBufferMemoryRequirements.get(),
+ function_pointers->vkGetImageMemoryRequirements.get(),
+ function_pointers->vkCreateBuffer.get(),
+ function_pointers->vkDestroyBuffer.get(),
+ function_pointers->vkCreateImage.get(),
+ function_pointers->vkDestroyImage.get(),
+ function_pointers->vkCmdCopyBuffer.get(),
+ function_pointers->vkGetBufferMemoryRequirements2.get(),
+ function_pointers->vkGetImageMemoryRequirements2.get(),
+ function_pointers->vkBindBufferMemory2.get(),
+ function_pointers->vkBindImageMemory2.get(),
+ function_pointers->vkGetPhysicalDeviceMemoryProperties2.get(),
};
+ static_assert(kVulkanRequiredApiVersion >= VK_API_VERSION_1_1, "");
VmaAllocatorCreateInfo allocator_info = {
.flags = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT,
.physicalDevice = physical_device,
@@ -51,6 +55,7 @@ VkResult CreateAllocator(VkPhysicalDevice physical_device,
.preferredLargeHeapBlockSize = 4 * 1024 * 1024,
.pVulkanFunctions = &functions,
.instance = instance,
+ .vulkanApiVersion = kVulkanRequiredApiVersion,
};
return vmaCreateAllocator(&allocator_info, pAllocator);
@@ -113,18 +118,18 @@ void FreeMemory(VmaAllocator allocator, VmaAllocation allocation) {
vmaFreeMemory(allocator, allocation);
}
-void FlushAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size) {
- vmaFlushAllocation(allocator, allocation, offset, size);
+VkResult FlushAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ return vmaFlushAllocation(allocator, allocation, offset, size);
}
-void InvalidateAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size) {
- vmaInvalidateAllocation(allocator, allocation, offset, size);
+VkResult InvalidateAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size) {
+ return vmaInvalidateAllocation(allocator, allocation, offset, size);
}
void GetAllocationInfo(VmaAllocator allocator,
diff --git a/chromium/gpu/vulkan/vma_wrapper.h b/chromium/gpu/vulkan/vma_wrapper.h
index 02e3b5ff7ae..502bdbcd15e 100644
--- a/chromium/gpu/vulkan/vma_wrapper.h
+++ b/chromium/gpu/vulkan/vma_wrapper.h
@@ -66,16 +66,16 @@ COMPONENT_EXPORT(VULKAN)
void FreeMemory(VmaAllocator allocator, VmaAllocation allocation);
COMPONENT_EXPORT(VULKAN)
-void FlushAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size);
+VkResult FlushAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
COMPONENT_EXPORT(VULKAN)
-void InvalidateAllocation(VmaAllocator allocator,
- VmaAllocation allocation,
- VkDeviceSize offset,
- VkDeviceSize size);
+VkResult InvalidateAllocation(VmaAllocator allocator,
+ VmaAllocation allocation,
+ VkDeviceSize offset,
+ VkDeviceSize size);
COMPONENT_EXPORT(VULKAN)
void GetAllocationInfo(VmaAllocator allocator,
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.cc b/chromium/gpu/vulkan/vulkan_command_buffer.cc
index d3cb4b4fe3c..56b91da1beb 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.cc
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.cc
@@ -8,6 +8,7 @@
#include "gpu/vulkan/vulkan_command_pool.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
+#include "gpu/vulkan/vulkan_util.h"
namespace gpu {
@@ -174,7 +175,7 @@ bool VulkanCommandBuffer::Submit(uint32_t num_wait_semaphores,
}
result =
- vkQueueSubmit(device_queue_->GetVulkanQueue(), 1, &submit_info, fence);
+ QueueSubmitHook(device_queue_->GetVulkanQueue(), 1, &submit_info, fence);
if (VK_SUCCESS != result) {
vkDestroyFence(device_queue_->GetVulkanDevice(), fence, nullptr);
diff --git a/chromium/gpu/vulkan/vulkan_command_buffer.h b/chromium/gpu/vulkan/vulkan_command_buffer.h
index f0cf3ef29a4..282c3e57471 100644
--- a/chromium/gpu/vulkan/vulkan_command_buffer.h
+++ b/chromium/gpu/vulkan/vulkan_command_buffer.h
@@ -7,8 +7,8 @@
#include <vulkan/vulkan.h>
+#include "base/check.h"
#include "base/component_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "gpu/vulkan/vulkan_fence_helper.h"
diff --git a/chromium/gpu/vulkan/vulkan_cxx.h b/chromium/gpu/vulkan/vulkan_cxx.h
new file mode 100644
index 00000000000..c1059c049b4
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_cxx.h
@@ -0,0 +1,42 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef GPU_VULKAN_VULKAN_CXX_H_
+#define GPU_VULKAN_VULKAN_CXX_H_
+
+#include <ostream>
+
+#include "base/compiler_specific.h"
+
+// Disable vulkan prototypes.
+#if !defined(VK_NO_PROTOTYPES)
+#define VK_NO_PROTOTYPES 1
+#endif
+
+// Disable dynamic loader tool.
+#define VULKAN_HPP_ENABLE_DYNAMIC_LOADER_TOOL 0
+
+// Disable c++ exceptions.
+#define VULKAN_HPP_NO_EXCEPTIONS 1
+
+// Disable dynamic dispatch loader.
+#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 0
+
+// Set gpu::VulkanFunctionPointers as the default dispatcher.
+#define VULKAN_HPP_DEFAULT_DISPATCHER (*gpu::GetVulkanFunctionPointers())
+#define VULKAN_HPP_DEFAULT_DISPATCHER_TYPE gpu::VulkanFunctionPointers
+
+#define VULKAN_HPP_TYPESAFE_CONVERSION
+
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+#include <vulkan/vulkan.hpp>
+
+// operator for LOG() << result
+ALWAYS_INLINE std::ostream& operator<<(std::ostream& out, vk::Result result) {
+ out << static_cast<VkResult>(result);
+ return out;
+}
+
+#endif // GPU_VULKAN_VULKAN_CXX_H_ \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_cxx_unittest.cc b/chromium/gpu/vulkan/vulkan_cxx_unittest.cc
new file mode 100644
index 00000000000..b0f16eaaa61
--- /dev/null
+++ b/chromium/gpu/vulkan/vulkan_cxx_unittest.cc
@@ -0,0 +1,90 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/native_library.h"
+#include "base/path_service.h"
+#include "build/build_config.h"
+#include "gpu/vulkan/vulkan_cxx.h"
+#include "gpu/vulkan/vulkan_function_pointers.h"
+
+namespace gpu {
+
+class VulkanCXXTest : public testing::Test {
+ public:
+ VulkanCXXTest() = default;
+ ~VulkanCXXTest() override = default;
+
+ void SetUp() override {
+ use_swiftshader_ =
+ base::CommandLine::ForCurrentProcess()->HasSwitch("use-swiftshader");
+ base::FilePath path;
+#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_CHROMEOS) || \
+ defined(OS_FUCHSIA)
+ if (use_swiftshader_) {
+#if defined(OS_LINUX)
+ EXPECT_TRUE(base::PathService::Get(base::DIR_MODULE, &path));
+ path = path.Append("libvk_swiftshader.so");
+#else
+ return;
+#endif
+ } else {
+ path = base::FilePath("libvulkan.so.1");
+ }
+#elif defined(OS_WIN)
+ if (use_swiftshader_) {
+ EXPECT_TRUE(base::PathService::Get(base::DIR_MODULE, &path));
+ path = path.Append(L"vk_swiftshader.dll");
+ } else {
+ path = base::FilePath(L"vulkan-1.dll");
+ }
+#else
+#error "Not supported platform"
+#endif
+
+ auto* vulkan_function_pointers = GetVulkanFunctionPointers();
+ base::NativeLibraryLoadError native_library_load_error;
+ vulkan_function_pointers->vulkan_loader_library =
+ base::LoadNativeLibrary(path, &native_library_load_error);
+ EXPECT_TRUE(vulkan_function_pointers->vulkan_loader_library);
+ }
+
+ void TearDown() override {
+ auto* vulkan_function_pointers = GetVulkanFunctionPointers();
+ base::UnloadNativeLibrary(vulkan_function_pointers->vulkan_loader_library);
+ }
+
+ private:
+ bool use_swiftshader_ = false;
+};
+
+TEST_F(VulkanCXXTest, CreateInstanceUnique) {
+ auto* vulkan_function_pointers = GetVulkanFunctionPointers();
+ EXPECT_TRUE(vulkan_function_pointers->BindUnassociatedFunctionPointers());
+
+ vk::Result result;
+ uint32_t api_version;
+ std::tie(result, api_version) = vk::enumerateInstanceVersion();
+ EXPECT_EQ(result, vk::Result::eSuccess);
+ EXPECT_GE(api_version, kVulkanRequiredApiVersion);
+
+ vk::ApplicationInfo app_info("VulkanCXXTest", 0, nullptr, 0,
+ kVulkanRequiredApiVersion);
+ vk::InstanceCreateInfo instance_create_info({}, &app_info);
+ auto result_value = vk::createInstanceUnique(instance_create_info);
+ EXPECT_EQ(result_value.result, vk::Result::eSuccess);
+
+ vk::UniqueInstance instance = std::move(result_value.value);
+ EXPECT_TRUE(instance);
+
+ EXPECT_TRUE(vulkan_function_pointers->BindInstanceFunctionPointers(
+ instance.get(), kVulkanRequiredApiVersion, gfx::ExtensionSet()));
+
+ instance.reset();
+}
+
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.cc b/chromium/gpu/vulkan/vulkan_device_queue.cc
index 77127965721..6ca7cefc203 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.cc
+++ b/chromium/gpu/vulkan/vulkan_device_queue.cc
@@ -9,6 +9,7 @@
#include <utility>
#include <vector>
+#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "gpu/config/gpu_info.h" // nogncheck
#include "gpu/config/vulkan_info.h"
@@ -75,6 +76,9 @@ bool VulkanDeviceQueue::Initialize(
for (size_t i = 0; i < info.physical_devices.size(); ++i) {
const auto& device_info = info.physical_devices[i];
const auto& device_properties = device_info.properties;
+ if (device_properties.apiVersion < info.used_api_version)
+ continue;
+
const VkPhysicalDevice& device = device_info.device;
for (size_t n = 0; n < device_info.queue_families.size(); ++n) {
if ((device_info.queue_families[n].queueFlags & queue_flags) !=
@@ -172,12 +176,6 @@ bool VulkanDeviceQueue::Initialize(
}
}
- if (vk_physical_device_properties_.apiVersion < info.used_api_version) {
- LOG(ERROR) << "Physical device doesn't support version."
- << info.used_api_version;
- return false;
- }
-
crash_keys::vulkan_device_api_version.Set(
VkVersionToString(vk_physical_device_properties_.apiVersion));
crash_keys::vulkan_device_driver_version.Set(base::StringPrintf(
diff --git a/chromium/gpu/vulkan/vulkan_device_queue.h b/chromium/gpu/vulkan/vulkan_device_queue.h
index d1704fad96c..f7b23c07609 100644
--- a/chromium/gpu/vulkan/vulkan_device_queue.h
+++ b/chromium/gpu/vulkan/vulkan_device_queue.h
@@ -10,8 +10,8 @@
#include <memory>
#include "base/callback.h"
+#include "base/check_op.h"
#include "base/component_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "build/build_config.h"
#include "gpu/vulkan/vma_wrapper.h"
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.cc b/chromium/gpu/vulkan/vulkan_fence_helper.cc
index d7d902a42b7..c8338a4afc2 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.cc
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.cc
@@ -5,6 +5,7 @@
#include "gpu/vulkan/vulkan_fence_helper.h"
#include "base/bind.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
@@ -75,32 +76,38 @@ void VulkanFenceHelper::EnqueueCleanupTaskForSubmittedWork(CleanupTask task) {
tasks_pending_fence_.emplace_back(std::move(task));
}
-void VulkanFenceHelper::ProcessCleanupTasks() {
+void VulkanFenceHelper::ProcessCleanupTasks(uint64_t retired_generation_id) {
VkDevice device = device_queue_->GetVulkanDevice();
+ if (!retired_generation_id)
+ retired_generation_id = current_generation_;
+
// Iterate over our pending cleanup fences / tasks, advancing
// |current_generation_| as far as possible.
for (const auto& tasks_for_fence : cleanup_tasks_) {
- // If we're already ahead of this task (callback modified |generation_id_|),
- // continue.
- if (tasks_for_fence.generation_id <= current_generation_)
- continue;
-
// Callback based tasks have no actual fence to wait on, keep checking
// future fences, as a callback may be delayed.
if (tasks_for_fence.UsingCallback())
continue;
VkResult result = vkGetFenceStatus(device, tasks_for_fence.fence);
- if (result == VK_NOT_READY)
+ if (result == VK_NOT_READY) {
+ retired_generation_id =
+ std::min(retired_generation_id, tasks_for_fence.generation_id - 1);
break;
- if (result != VK_SUCCESS) {
- PerformImmediateCleanup();
- return;
}
- current_generation_ = tasks_for_fence.generation_id;
+ if (result == VK_SUCCESS) {
+ retired_generation_id =
+ std::max(tasks_for_fence.generation_id, retired_generation_id);
+ continue;
+ }
+ DLOG(ERROR) << "vkGetFenceStatus() failed: " << result;
+ PerformImmediateCleanup();
+ return;
}
+ current_generation_ = retired_generation_id;
+
// Runs any cleanup tasks for generations that have passed. Create a temporary
// vector of tasks to run to avoid reentrancy issues.
std::vector<CleanupTask> tasks_to_run;
@@ -161,8 +168,7 @@ base::OnceClosure VulkanFenceHelper::CreateExternalCallback() {
// If |current_generation_| is ahead of the callback's
// |generation_id|, the callback came late. Ignore it.
if (generation_id > fence_helper->current_generation_) {
- fence_helper->current_generation_ = generation_id;
- fence_helper->ProcessCleanupTasks();
+ fence_helper->ProcessCleanupTasks(generation_id);
}
},
weak_factory_.GetWeakPtr(), generation_id);
diff --git a/chromium/gpu/vulkan/vulkan_fence_helper.h b/chromium/gpu/vulkan/vulkan_fence_helper.h
index 1b6e586aecc..571fc97ca66 100644
--- a/chromium/gpu/vulkan/vulkan_fence_helper.h
+++ b/chromium/gpu/vulkan/vulkan_fence_helper.h
@@ -101,7 +101,7 @@ class COMPONENT_EXPORT(VULKAN) VulkanFenceHelper {
// executed in order they are enqueued.
void EnqueueCleanupTaskForSubmittedWork(CleanupTask task);
// Processes CleanupTasks for which a fence has passed.
- void ProcessCleanupTasks();
+ void ProcessCleanupTasks(uint64_t retired_generation_id = 0);
// Helpers for common types:
void EnqueueSemaphoreCleanupForSubmittedWork(VkSemaphore semaphore);
void EnqueueSemaphoresCleanupForSubmittedWork(
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.cc b/chromium/gpu/vulkan/vulkan_function_pointers.cc
index 27ca23b12f7..0d40a7c687e 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.cc
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.cc
@@ -27,39 +27,41 @@ bool VulkanFunctionPointers::BindUnassociatedFunctionPointers() {
// vkGetInstanceProcAddr must be handled specially since it gets its function
// pointer through base::GetFunctionPOinterFromNativeLibrary(). Other Vulkan
// functions don't do this.
- vkGetInstanceProcAddrFn = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
+ vkGetInstanceProcAddr = reinterpret_cast<PFN_vkGetInstanceProcAddr>(
base::GetFunctionPointerFromNativeLibrary(vulkan_loader_library,
"vkGetInstanceProcAddr"));
- if (!vkGetInstanceProcAddrFn)
+ if (!vkGetInstanceProcAddr)
return false;
+ vkEnumerateInstanceVersion = reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
+ vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
+ if (!vkEnumerateInstanceVersion) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkEnumerateInstanceVersion";
+ return false;
+ }
- vkEnumerateInstanceVersionFn =
- reinterpret_cast<PFN_vkEnumerateInstanceVersion>(
- vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceVersion"));
- // vkEnumerateInstanceVersion didn't exist in Vulkan 1.0, so we should
- // proceed even if we fail to get vkEnumerateInstanceVersion pointer.
- vkCreateInstanceFn = reinterpret_cast<PFN_vkCreateInstance>(
+ vkCreateInstance = reinterpret_cast<PFN_vkCreateInstance>(
vkGetInstanceProcAddr(nullptr, "vkCreateInstance"));
- if (!vkCreateInstanceFn) {
+ if (!vkCreateInstance) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateInstance";
return false;
}
- vkEnumerateInstanceExtensionPropertiesFn =
+ vkEnumerateInstanceExtensionProperties =
reinterpret_cast<PFN_vkEnumerateInstanceExtensionProperties>(
vkGetInstanceProcAddr(nullptr,
"vkEnumerateInstanceExtensionProperties"));
- if (!vkEnumerateInstanceExtensionPropertiesFn) {
+ if (!vkEnumerateInstanceExtensionProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateInstanceExtensionProperties";
return false;
}
- vkEnumerateInstanceLayerPropertiesFn =
+ vkEnumerateInstanceLayerProperties =
reinterpret_cast<PFN_vkEnumerateInstanceLayerProperties>(
vkGetInstanceProcAddr(nullptr, "vkEnumerateInstanceLayerProperties"));
- if (!vkEnumerateInstanceLayerPropertiesFn) {
+ if (!vkEnumerateInstanceLayerProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateInstanceLayerProperties";
return false;
@@ -72,102 +74,122 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
VkInstance vk_instance,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
- vkCreateDeviceFn = reinterpret_cast<PFN_vkCreateDevice>(
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
+ vkCreateDevice = reinterpret_cast<PFN_vkCreateDevice>(
vkGetInstanceProcAddr(vk_instance, "vkCreateDevice"));
- if (!vkCreateDeviceFn) {
+ if (!vkCreateDevice) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDevice";
return false;
}
- vkDestroyInstanceFn = reinterpret_cast<PFN_vkDestroyInstance>(
+ vkDestroyInstance = reinterpret_cast<PFN_vkDestroyInstance>(
vkGetInstanceProcAddr(vk_instance, "vkDestroyInstance"));
- if (!vkDestroyInstanceFn) {
+ if (!vkDestroyInstance) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyInstance";
return false;
}
- vkEnumerateDeviceExtensionPropertiesFn =
+ vkEnumerateDeviceExtensionProperties =
reinterpret_cast<PFN_vkEnumerateDeviceExtensionProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkEnumerateDeviceExtensionProperties"));
- if (!vkEnumerateDeviceExtensionPropertiesFn) {
+ if (!vkEnumerateDeviceExtensionProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateDeviceExtensionProperties";
return false;
}
- vkEnumerateDeviceLayerPropertiesFn =
+ vkEnumerateDeviceLayerProperties =
reinterpret_cast<PFN_vkEnumerateDeviceLayerProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkEnumerateDeviceLayerProperties"));
- if (!vkEnumerateDeviceLayerPropertiesFn) {
+ if (!vkEnumerateDeviceLayerProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumerateDeviceLayerProperties";
return false;
}
- vkEnumeratePhysicalDevicesFn =
- reinterpret_cast<PFN_vkEnumeratePhysicalDevices>(
- vkGetInstanceProcAddr(vk_instance, "vkEnumeratePhysicalDevices"));
- if (!vkEnumeratePhysicalDevicesFn) {
+ vkEnumeratePhysicalDevices = reinterpret_cast<PFN_vkEnumeratePhysicalDevices>(
+ vkGetInstanceProcAddr(vk_instance, "vkEnumeratePhysicalDevices"));
+ if (!vkEnumeratePhysicalDevices) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEnumeratePhysicalDevices";
return false;
}
- vkGetDeviceProcAddrFn = reinterpret_cast<PFN_vkGetDeviceProcAddr>(
+ vkGetDeviceProcAddr = reinterpret_cast<PFN_vkGetDeviceProcAddr>(
vkGetInstanceProcAddr(vk_instance, "vkGetDeviceProcAddr"));
- if (!vkGetDeviceProcAddrFn) {
+ if (!vkGetDeviceProcAddr) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceProcAddr";
return false;
}
- vkGetPhysicalDeviceFeaturesFn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures>(
- vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures"));
- if (!vkGetPhysicalDeviceFeaturesFn) {
+ vkGetPhysicalDeviceFeatures2 =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
+ vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures2"));
+ if (!vkGetPhysicalDeviceFeatures2) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceFeatures";
+ << "vkGetPhysicalDeviceFeatures2";
return false;
}
- vkGetPhysicalDeviceFormatPropertiesFn =
+ vkGetPhysicalDeviceFormatProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceFormatProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceFormatProperties"));
- if (!vkGetPhysicalDeviceFormatPropertiesFn) {
+ if (!vkGetPhysicalDeviceFormatProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceFormatProperties";
return false;
}
- vkGetPhysicalDeviceMemoryPropertiesFn =
+ vkGetPhysicalDeviceImageFormatProperties2 =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceImageFormatProperties2>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceImageFormatProperties2"));
+ if (!vkGetPhysicalDeviceImageFormatProperties2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetPhysicalDeviceImageFormatProperties2";
+ return false;
+ }
+
+ vkGetPhysicalDeviceMemoryProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceMemoryProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceMemoryProperties"));
- if (!vkGetPhysicalDeviceMemoryPropertiesFn) {
+ if (!vkGetPhysicalDeviceMemoryProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceMemoryProperties";
return false;
}
- vkGetPhysicalDevicePropertiesFn =
+ vkGetPhysicalDeviceMemoryProperties2 =
+ reinterpret_cast<PFN_vkGetPhysicalDeviceMemoryProperties2>(
+ vkGetInstanceProcAddr(vk_instance,
+ "vkGetPhysicalDeviceMemoryProperties2"));
+ if (!vkGetPhysicalDeviceMemoryProperties2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetPhysicalDeviceMemoryProperties2";
+ return false;
+ }
+
+ vkGetPhysicalDeviceProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceProperties>(
vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceProperties"));
- if (!vkGetPhysicalDevicePropertiesFn) {
+ if (!vkGetPhysicalDeviceProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceProperties";
return false;
}
- vkGetPhysicalDeviceQueueFamilyPropertiesFn =
+ vkGetPhysicalDeviceQueueFamilyProperties =
reinterpret_cast<PFN_vkGetPhysicalDeviceQueueFamilyProperties>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceQueueFamilyProperties"));
- if (!vkGetPhysicalDeviceQueueFamilyPropertiesFn) {
+ if (!vkGetPhysicalDeviceQueueFamilyProperties) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceQueueFamilyProperties";
return false;
@@ -176,21 +198,21 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if DCHECK_IS_ON()
if (gfx::HasExtension(enabled_extensions,
VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
- vkCreateDebugReportCallbackEXTFn =
+ vkCreateDebugReportCallbackEXT =
reinterpret_cast<PFN_vkCreateDebugReportCallbackEXT>(
vkGetInstanceProcAddr(vk_instance,
"vkCreateDebugReportCallbackEXT"));
- if (!vkCreateDebugReportCallbackEXTFn) {
+ if (!vkCreateDebugReportCallbackEXT) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDebugReportCallbackEXT";
return false;
}
- vkDestroyDebugReportCallbackEXTFn =
+ vkDestroyDebugReportCallbackEXT =
reinterpret_cast<PFN_vkDestroyDebugReportCallbackEXT>(
vkGetInstanceProcAddr(vk_instance,
"vkDestroyDebugReportCallbackEXT"));
- if (!vkDestroyDebugReportCallbackEXTFn) {
+ if (!vkDestroyDebugReportCallbackEXT) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDebugReportCallbackEXT";
return false;
@@ -199,39 +221,39 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#endif // DCHECK_IS_ON()
if (gfx::HasExtension(enabled_extensions, VK_KHR_SURFACE_EXTENSION_NAME)) {
- vkDestroySurfaceKHRFn = reinterpret_cast<PFN_vkDestroySurfaceKHR>(
+ vkDestroySurfaceKHR = reinterpret_cast<PFN_vkDestroySurfaceKHR>(
vkGetInstanceProcAddr(vk_instance, "vkDestroySurfaceKHR"));
- if (!vkDestroySurfaceKHRFn) {
+ if (!vkDestroySurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySurfaceKHR";
return false;
}
- vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn =
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceSurfaceCapabilitiesKHR"));
- if (!vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn) {
+ if (!vkGetPhysicalDeviceSurfaceCapabilitiesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceCapabilitiesKHR";
return false;
}
- vkGetPhysicalDeviceSurfaceFormatsKHRFn =
+ vkGetPhysicalDeviceSurfaceFormatsKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceSurfaceFormatsKHR"));
- if (!vkGetPhysicalDeviceSurfaceFormatsKHRFn) {
+ if (!vkGetPhysicalDeviceSurfaceFormatsKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceFormatsKHR";
return false;
}
- vkGetPhysicalDeviceSurfaceSupportKHRFn =
+ vkGetPhysicalDeviceSurfaceSupportKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>(
vkGetInstanceProcAddr(vk_instance,
"vkGetPhysicalDeviceSurfaceSupportKHR"));
- if (!vkGetPhysicalDeviceSurfaceSupportKHRFn) {
+ if (!vkGetPhysicalDeviceSurfaceSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceSurfaceSupportKHR";
return false;
@@ -241,19 +263,19 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(USE_VULKAN_XLIB)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_XLIB_SURFACE_EXTENSION_NAME)) {
- vkCreateXlibSurfaceKHRFn = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
+ vkCreateXlibSurfaceKHR = reinterpret_cast<PFN_vkCreateXlibSurfaceKHR>(
vkGetInstanceProcAddr(vk_instance, "vkCreateXlibSurfaceKHR"));
- if (!vkCreateXlibSurfaceKHRFn) {
+ if (!vkCreateXlibSurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateXlibSurfaceKHR";
return false;
}
- vkGetPhysicalDeviceXlibPresentationSupportKHRFn =
+ vkGetPhysicalDeviceXlibPresentationSupportKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>(
vkGetInstanceProcAddr(
vk_instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR"));
- if (!vkGetPhysicalDeviceXlibPresentationSupportKHRFn) {
+ if (!vkGetPhysicalDeviceXlibPresentationSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceXlibPresentationSupportKHR";
return false;
@@ -264,19 +286,19 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_WIN32_SURFACE_EXTENSION_NAME)) {
- vkCreateWin32SurfaceKHRFn = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
+ vkCreateWin32SurfaceKHR = reinterpret_cast<PFN_vkCreateWin32SurfaceKHR>(
vkGetInstanceProcAddr(vk_instance, "vkCreateWin32SurfaceKHR"));
- if (!vkCreateWin32SurfaceKHRFn) {
+ if (!vkCreateWin32SurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateWin32SurfaceKHR";
return false;
}
- vkGetPhysicalDeviceWin32PresentationSupportKHRFn =
+ vkGetPhysicalDeviceWin32PresentationSupportKHR =
reinterpret_cast<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>(
vkGetInstanceProcAddr(
vk_instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR"));
- if (!vkGetPhysicalDeviceWin32PresentationSupportKHRFn) {
+ if (!vkGetPhysicalDeviceWin32PresentationSupportKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetPhysicalDeviceWin32PresentationSupportKHR";
return false;
@@ -287,10 +309,9 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
- vkCreateAndroidSurfaceKHRFn =
- reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>(
- vkGetInstanceProcAddr(vk_instance, "vkCreateAndroidSurfaceKHR"));
- if (!vkCreateAndroidSurfaceKHRFn) {
+ vkCreateAndroidSurfaceKHR = reinterpret_cast<PFN_vkCreateAndroidSurfaceKHR>(
+ vkGetInstanceProcAddr(vk_instance, "vkCreateAndroidSurfaceKHR"));
+ if (!vkCreateAndroidSurfaceKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateAndroidSurfaceKHR";
return false;
@@ -301,11 +322,11 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_IMAGEPIPE_SURFACE_EXTENSION_NAME)) {
- vkCreateImagePipeSurfaceFUCHSIAFn =
+ vkCreateImagePipeSurfaceFUCHSIA =
reinterpret_cast<PFN_vkCreateImagePipeSurfaceFUCHSIA>(
vkGetInstanceProcAddr(vk_instance,
"vkCreateImagePipeSurfaceFUCHSIA"));
- if (!vkCreateImagePipeSurfaceFUCHSIAFn) {
+ if (!vkCreateImagePipeSurfaceFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImagePipeSurfaceFUCHSIA";
return false;
@@ -313,42 +334,6 @@ bool VulkanFunctionPointers::BindInstanceFunctionPointers(
}
#endif // defined(OS_FUCHSIA)
- if (api_version >= VK_API_VERSION_1_1) {
- vkGetPhysicalDeviceImageFormatProperties2Fn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceImageFormatProperties2>(
- vkGetInstanceProcAddr(vk_instance,
- "vkGetPhysicalDeviceImageFormatProperties2"));
- if (!vkGetPhysicalDeviceImageFormatProperties2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceImageFormatProperties2";
- return false;
- }
- }
-
- if (api_version >= VK_API_VERSION_1_1) {
- vkGetPhysicalDeviceFeatures2Fn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
- vkGetInstanceProcAddr(vk_instance, "vkGetPhysicalDeviceFeatures2"));
- if (!vkGetPhysicalDeviceFeatures2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceFeatures2";
- return false;
- }
-
- } else if (gfx::HasExtension(
- enabled_extensions,
- VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) {
- vkGetPhysicalDeviceFeatures2Fn =
- reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2>(
- vkGetInstanceProcAddr(vk_instance,
- "vkGetPhysicalDeviceFeatures2KHR"));
- if (!vkGetPhysicalDeviceFeatures2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetPhysicalDeviceFeatures2KHR";
- return false;
- }
- }
-
return true;
}
@@ -356,505 +341,520 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
VkDevice vk_device,
uint32_t api_version,
const gfx::ExtensionSet& enabled_extensions) {
+ DCHECK_GE(api_version, kVulkanRequiredApiVersion);
// Device functions
- vkAllocateCommandBuffersFn = reinterpret_cast<PFN_vkAllocateCommandBuffers>(
+ vkAllocateCommandBuffers = reinterpret_cast<PFN_vkAllocateCommandBuffers>(
vkGetDeviceProcAddr(vk_device, "vkAllocateCommandBuffers"));
- if (!vkAllocateCommandBuffersFn) {
+ if (!vkAllocateCommandBuffers) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateCommandBuffers";
return false;
}
- vkAllocateDescriptorSetsFn = reinterpret_cast<PFN_vkAllocateDescriptorSets>(
+ vkAllocateDescriptorSets = reinterpret_cast<PFN_vkAllocateDescriptorSets>(
vkGetDeviceProcAddr(vk_device, "vkAllocateDescriptorSets"));
- if (!vkAllocateDescriptorSetsFn) {
+ if (!vkAllocateDescriptorSets) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateDescriptorSets";
return false;
}
- vkAllocateMemoryFn = reinterpret_cast<PFN_vkAllocateMemory>(
+ vkAllocateMemory = reinterpret_cast<PFN_vkAllocateMemory>(
vkGetDeviceProcAddr(vk_device, "vkAllocateMemory"));
- if (!vkAllocateMemoryFn) {
+ if (!vkAllocateMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAllocateMemory";
return false;
}
- vkBeginCommandBufferFn = reinterpret_cast<PFN_vkBeginCommandBuffer>(
+ vkBeginCommandBuffer = reinterpret_cast<PFN_vkBeginCommandBuffer>(
vkGetDeviceProcAddr(vk_device, "vkBeginCommandBuffer"));
- if (!vkBeginCommandBufferFn) {
+ if (!vkBeginCommandBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBeginCommandBuffer";
return false;
}
- vkBindBufferMemoryFn = reinterpret_cast<PFN_vkBindBufferMemory>(
+ vkBindBufferMemory = reinterpret_cast<PFN_vkBindBufferMemory>(
vkGetDeviceProcAddr(vk_device, "vkBindBufferMemory"));
- if (!vkBindBufferMemoryFn) {
+ if (!vkBindBufferMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBindBufferMemory";
return false;
}
- vkBindImageMemoryFn = reinterpret_cast<PFN_vkBindImageMemory>(
+ vkBindBufferMemory2 = reinterpret_cast<PFN_vkBindBufferMemory2>(
+ vkGetDeviceProcAddr(vk_device, "vkBindBufferMemory2"));
+ if (!vkBindBufferMemory2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkBindBufferMemory2";
+ return false;
+ }
+
+ vkBindImageMemory = reinterpret_cast<PFN_vkBindImageMemory>(
vkGetDeviceProcAddr(vk_device, "vkBindImageMemory"));
- if (!vkBindImageMemoryFn) {
+ if (!vkBindImageMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkBindImageMemory";
return false;
}
- vkCmdBeginRenderPassFn = reinterpret_cast<PFN_vkCmdBeginRenderPass>(
+ vkBindImageMemory2 = reinterpret_cast<PFN_vkBindImageMemory2>(
+ vkGetDeviceProcAddr(vk_device, "vkBindImageMemory2"));
+ if (!vkBindImageMemory2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkBindImageMemory2";
+ return false;
+ }
+
+ vkCmdBeginRenderPass = reinterpret_cast<PFN_vkCmdBeginRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkCmdBeginRenderPass"));
- if (!vkCmdBeginRenderPassFn) {
+ if (!vkCmdBeginRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdBeginRenderPass";
return false;
}
- vkCmdCopyBufferFn = reinterpret_cast<PFN_vkCmdCopyBuffer>(
+ vkCmdCopyBuffer = reinterpret_cast<PFN_vkCmdCopyBuffer>(
vkGetDeviceProcAddr(vk_device, "vkCmdCopyBuffer"));
- if (!vkCmdCopyBufferFn) {
+ if (!vkCmdCopyBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdCopyBuffer";
return false;
}
- vkCmdCopyBufferToImageFn = reinterpret_cast<PFN_vkCmdCopyBufferToImage>(
+ vkCmdCopyBufferToImage = reinterpret_cast<PFN_vkCmdCopyBufferToImage>(
vkGetDeviceProcAddr(vk_device, "vkCmdCopyBufferToImage"));
- if (!vkCmdCopyBufferToImageFn) {
+ if (!vkCmdCopyBufferToImage) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdCopyBufferToImage";
return false;
}
- vkCmdEndRenderPassFn = reinterpret_cast<PFN_vkCmdEndRenderPass>(
+ vkCmdEndRenderPass = reinterpret_cast<PFN_vkCmdEndRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkCmdEndRenderPass"));
- if (!vkCmdEndRenderPassFn) {
+ if (!vkCmdEndRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdEndRenderPass";
return false;
}
- vkCmdExecuteCommandsFn = reinterpret_cast<PFN_vkCmdExecuteCommands>(
+ vkCmdExecuteCommands = reinterpret_cast<PFN_vkCmdExecuteCommands>(
vkGetDeviceProcAddr(vk_device, "vkCmdExecuteCommands"));
- if (!vkCmdExecuteCommandsFn) {
+ if (!vkCmdExecuteCommands) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdExecuteCommands";
return false;
}
- vkCmdNextSubpassFn = reinterpret_cast<PFN_vkCmdNextSubpass>(
+ vkCmdNextSubpass = reinterpret_cast<PFN_vkCmdNextSubpass>(
vkGetDeviceProcAddr(vk_device, "vkCmdNextSubpass"));
- if (!vkCmdNextSubpassFn) {
+ if (!vkCmdNextSubpass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdNextSubpass";
return false;
}
- vkCmdPipelineBarrierFn = reinterpret_cast<PFN_vkCmdPipelineBarrier>(
+ vkCmdPipelineBarrier = reinterpret_cast<PFN_vkCmdPipelineBarrier>(
vkGetDeviceProcAddr(vk_device, "vkCmdPipelineBarrier"));
- if (!vkCmdPipelineBarrierFn) {
+ if (!vkCmdPipelineBarrier) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCmdPipelineBarrier";
return false;
}
- vkCreateBufferFn = reinterpret_cast<PFN_vkCreateBuffer>(
+ vkCreateBuffer = reinterpret_cast<PFN_vkCreateBuffer>(
vkGetDeviceProcAddr(vk_device, "vkCreateBuffer"));
- if (!vkCreateBufferFn) {
+ if (!vkCreateBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateBuffer";
return false;
}
- vkCreateCommandPoolFn = reinterpret_cast<PFN_vkCreateCommandPool>(
+ vkCreateCommandPool = reinterpret_cast<PFN_vkCreateCommandPool>(
vkGetDeviceProcAddr(vk_device, "vkCreateCommandPool"));
- if (!vkCreateCommandPoolFn) {
+ if (!vkCreateCommandPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateCommandPool";
return false;
}
- vkCreateDescriptorPoolFn = reinterpret_cast<PFN_vkCreateDescriptorPool>(
+ vkCreateDescriptorPool = reinterpret_cast<PFN_vkCreateDescriptorPool>(
vkGetDeviceProcAddr(vk_device, "vkCreateDescriptorPool"));
- if (!vkCreateDescriptorPoolFn) {
+ if (!vkCreateDescriptorPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDescriptorPool";
return false;
}
- vkCreateDescriptorSetLayoutFn =
+ vkCreateDescriptorSetLayout =
reinterpret_cast<PFN_vkCreateDescriptorSetLayout>(
vkGetDeviceProcAddr(vk_device, "vkCreateDescriptorSetLayout"));
- if (!vkCreateDescriptorSetLayoutFn) {
+ if (!vkCreateDescriptorSetLayout) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateDescriptorSetLayout";
return false;
}
- vkCreateFenceFn = reinterpret_cast<PFN_vkCreateFence>(
+ vkCreateFence = reinterpret_cast<PFN_vkCreateFence>(
vkGetDeviceProcAddr(vk_device, "vkCreateFence"));
- if (!vkCreateFenceFn) {
+ if (!vkCreateFence) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateFence";
return false;
}
- vkCreateFramebufferFn = reinterpret_cast<PFN_vkCreateFramebuffer>(
+ vkCreateFramebuffer = reinterpret_cast<PFN_vkCreateFramebuffer>(
vkGetDeviceProcAddr(vk_device, "vkCreateFramebuffer"));
- if (!vkCreateFramebufferFn) {
+ if (!vkCreateFramebuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateFramebuffer";
return false;
}
- vkCreateImageFn = reinterpret_cast<PFN_vkCreateImage>(
+ vkCreateImage = reinterpret_cast<PFN_vkCreateImage>(
vkGetDeviceProcAddr(vk_device, "vkCreateImage"));
- if (!vkCreateImageFn) {
+ if (!vkCreateImage) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImage";
return false;
}
- vkCreateImageViewFn = reinterpret_cast<PFN_vkCreateImageView>(
+ vkCreateImageView = reinterpret_cast<PFN_vkCreateImageView>(
vkGetDeviceProcAddr(vk_device, "vkCreateImageView"));
- if (!vkCreateImageViewFn) {
+ if (!vkCreateImageView) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateImageView";
return false;
}
- vkCreateRenderPassFn = reinterpret_cast<PFN_vkCreateRenderPass>(
+ vkCreateRenderPass = reinterpret_cast<PFN_vkCreateRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkCreateRenderPass"));
- if (!vkCreateRenderPassFn) {
+ if (!vkCreateRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateRenderPass";
return false;
}
- vkCreateSamplerFn = reinterpret_cast<PFN_vkCreateSampler>(
+ vkCreateSampler = reinterpret_cast<PFN_vkCreateSampler>(
vkGetDeviceProcAddr(vk_device, "vkCreateSampler"));
- if (!vkCreateSamplerFn) {
+ if (!vkCreateSampler) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSampler";
return false;
}
- vkCreateSemaphoreFn = reinterpret_cast<PFN_vkCreateSemaphore>(
+ vkCreateSemaphore = reinterpret_cast<PFN_vkCreateSemaphore>(
vkGetDeviceProcAddr(vk_device, "vkCreateSemaphore"));
- if (!vkCreateSemaphoreFn) {
+ if (!vkCreateSemaphore) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSemaphore";
return false;
}
- vkCreateShaderModuleFn = reinterpret_cast<PFN_vkCreateShaderModule>(
+ vkCreateShaderModule = reinterpret_cast<PFN_vkCreateShaderModule>(
vkGetDeviceProcAddr(vk_device, "vkCreateShaderModule"));
- if (!vkCreateShaderModuleFn) {
+ if (!vkCreateShaderModule) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateShaderModule";
return false;
}
- vkDestroyBufferFn = reinterpret_cast<PFN_vkDestroyBuffer>(
+ vkDestroyBuffer = reinterpret_cast<PFN_vkDestroyBuffer>(
vkGetDeviceProcAddr(vk_device, "vkDestroyBuffer"));
- if (!vkDestroyBufferFn) {
+ if (!vkDestroyBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyBuffer";
return false;
}
- vkDestroyCommandPoolFn = reinterpret_cast<PFN_vkDestroyCommandPool>(
+ vkDestroyCommandPool = reinterpret_cast<PFN_vkDestroyCommandPool>(
vkGetDeviceProcAddr(vk_device, "vkDestroyCommandPool"));
- if (!vkDestroyCommandPoolFn) {
+ if (!vkDestroyCommandPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyCommandPool";
return false;
}
- vkDestroyDescriptorPoolFn = reinterpret_cast<PFN_vkDestroyDescriptorPool>(
+ vkDestroyDescriptorPool = reinterpret_cast<PFN_vkDestroyDescriptorPool>(
vkGetDeviceProcAddr(vk_device, "vkDestroyDescriptorPool"));
- if (!vkDestroyDescriptorPoolFn) {
+ if (!vkDestroyDescriptorPool) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDescriptorPool";
return false;
}
- vkDestroyDescriptorSetLayoutFn =
+ vkDestroyDescriptorSetLayout =
reinterpret_cast<PFN_vkDestroyDescriptorSetLayout>(
vkGetDeviceProcAddr(vk_device, "vkDestroyDescriptorSetLayout"));
- if (!vkDestroyDescriptorSetLayoutFn) {
+ if (!vkDestroyDescriptorSetLayout) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDescriptorSetLayout";
return false;
}
- vkDestroyDeviceFn = reinterpret_cast<PFN_vkDestroyDevice>(
+ vkDestroyDevice = reinterpret_cast<PFN_vkDestroyDevice>(
vkGetDeviceProcAddr(vk_device, "vkDestroyDevice"));
- if (!vkDestroyDeviceFn) {
+ if (!vkDestroyDevice) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyDevice";
return false;
}
- vkDestroyFenceFn = reinterpret_cast<PFN_vkDestroyFence>(
+ vkDestroyFence = reinterpret_cast<PFN_vkDestroyFence>(
vkGetDeviceProcAddr(vk_device, "vkDestroyFence"));
- if (!vkDestroyFenceFn) {
+ if (!vkDestroyFence) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyFence";
return false;
}
- vkDestroyFramebufferFn = reinterpret_cast<PFN_vkDestroyFramebuffer>(
+ vkDestroyFramebuffer = reinterpret_cast<PFN_vkDestroyFramebuffer>(
vkGetDeviceProcAddr(vk_device, "vkDestroyFramebuffer"));
- if (!vkDestroyFramebufferFn) {
+ if (!vkDestroyFramebuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyFramebuffer";
return false;
}
- vkDestroyImageFn = reinterpret_cast<PFN_vkDestroyImage>(
+ vkDestroyImage = reinterpret_cast<PFN_vkDestroyImage>(
vkGetDeviceProcAddr(vk_device, "vkDestroyImage"));
- if (!vkDestroyImageFn) {
+ if (!vkDestroyImage) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyImage";
return false;
}
- vkDestroyImageViewFn = reinterpret_cast<PFN_vkDestroyImageView>(
+ vkDestroyImageView = reinterpret_cast<PFN_vkDestroyImageView>(
vkGetDeviceProcAddr(vk_device, "vkDestroyImageView"));
- if (!vkDestroyImageViewFn) {
+ if (!vkDestroyImageView) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyImageView";
return false;
}
- vkDestroyRenderPassFn = reinterpret_cast<PFN_vkDestroyRenderPass>(
+ vkDestroyRenderPass = reinterpret_cast<PFN_vkDestroyRenderPass>(
vkGetDeviceProcAddr(vk_device, "vkDestroyRenderPass"));
- if (!vkDestroyRenderPassFn) {
+ if (!vkDestroyRenderPass) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyRenderPass";
return false;
}
- vkDestroySamplerFn = reinterpret_cast<PFN_vkDestroySampler>(
+ vkDestroySampler = reinterpret_cast<PFN_vkDestroySampler>(
vkGetDeviceProcAddr(vk_device, "vkDestroySampler"));
- if (!vkDestroySamplerFn) {
+ if (!vkDestroySampler) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySampler";
return false;
}
- vkDestroySemaphoreFn = reinterpret_cast<PFN_vkDestroySemaphore>(
+ vkDestroySemaphore = reinterpret_cast<PFN_vkDestroySemaphore>(
vkGetDeviceProcAddr(vk_device, "vkDestroySemaphore"));
- if (!vkDestroySemaphoreFn) {
+ if (!vkDestroySemaphore) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySemaphore";
return false;
}
- vkDestroyShaderModuleFn = reinterpret_cast<PFN_vkDestroyShaderModule>(
+ vkDestroyShaderModule = reinterpret_cast<PFN_vkDestroyShaderModule>(
vkGetDeviceProcAddr(vk_device, "vkDestroyShaderModule"));
- if (!vkDestroyShaderModuleFn) {
+ if (!vkDestroyShaderModule) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyShaderModule";
return false;
}
- vkDeviceWaitIdleFn = reinterpret_cast<PFN_vkDeviceWaitIdle>(
+ vkDeviceWaitIdle = reinterpret_cast<PFN_vkDeviceWaitIdle>(
vkGetDeviceProcAddr(vk_device, "vkDeviceWaitIdle"));
- if (!vkDeviceWaitIdleFn) {
+ if (!vkDeviceWaitIdle) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDeviceWaitIdle";
return false;
}
- vkFlushMappedMemoryRangesFn = reinterpret_cast<PFN_vkFlushMappedMemoryRanges>(
+ vkFlushMappedMemoryRanges = reinterpret_cast<PFN_vkFlushMappedMemoryRanges>(
vkGetDeviceProcAddr(vk_device, "vkFlushMappedMemoryRanges"));
- if (!vkFlushMappedMemoryRangesFn) {
+ if (!vkFlushMappedMemoryRanges) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFlushMappedMemoryRanges";
return false;
}
- vkEndCommandBufferFn = reinterpret_cast<PFN_vkEndCommandBuffer>(
+ vkEndCommandBuffer = reinterpret_cast<PFN_vkEndCommandBuffer>(
vkGetDeviceProcAddr(vk_device, "vkEndCommandBuffer"));
- if (!vkEndCommandBufferFn) {
+ if (!vkEndCommandBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkEndCommandBuffer";
return false;
}
- vkFreeCommandBuffersFn = reinterpret_cast<PFN_vkFreeCommandBuffers>(
+ vkFreeCommandBuffers = reinterpret_cast<PFN_vkFreeCommandBuffers>(
vkGetDeviceProcAddr(vk_device, "vkFreeCommandBuffers"));
- if (!vkFreeCommandBuffersFn) {
+ if (!vkFreeCommandBuffers) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeCommandBuffers";
return false;
}
- vkFreeDescriptorSetsFn = reinterpret_cast<PFN_vkFreeDescriptorSets>(
+ vkFreeDescriptorSets = reinterpret_cast<PFN_vkFreeDescriptorSets>(
vkGetDeviceProcAddr(vk_device, "vkFreeDescriptorSets"));
- if (!vkFreeDescriptorSetsFn) {
+ if (!vkFreeDescriptorSets) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeDescriptorSets";
return false;
}
- vkFreeMemoryFn = reinterpret_cast<PFN_vkFreeMemory>(
+ vkFreeMemory = reinterpret_cast<PFN_vkFreeMemory>(
vkGetDeviceProcAddr(vk_device, "vkFreeMemory"));
- if (!vkFreeMemoryFn) {
+ if (!vkFreeMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkFreeMemory";
return false;
}
- vkInvalidateMappedMemoryRangesFn =
+ vkInvalidateMappedMemoryRanges =
reinterpret_cast<PFN_vkInvalidateMappedMemoryRanges>(
vkGetDeviceProcAddr(vk_device, "vkInvalidateMappedMemoryRanges"));
- if (!vkInvalidateMappedMemoryRangesFn) {
+ if (!vkInvalidateMappedMemoryRanges) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkInvalidateMappedMemoryRanges";
return false;
}
- vkGetBufferMemoryRequirementsFn =
+ vkGetBufferMemoryRequirements =
reinterpret_cast<PFN_vkGetBufferMemoryRequirements>(
vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements"));
- if (!vkGetBufferMemoryRequirementsFn) {
+ if (!vkGetBufferMemoryRequirements) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetBufferMemoryRequirements";
return false;
}
- vkGetDeviceQueueFn = reinterpret_cast<PFN_vkGetDeviceQueue>(
+ vkGetBufferMemoryRequirements2 =
+ reinterpret_cast<PFN_vkGetBufferMemoryRequirements2>(
+ vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements2"));
+ if (!vkGetBufferMemoryRequirements2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetBufferMemoryRequirements2";
+ return false;
+ }
+
+ vkGetDeviceQueue = reinterpret_cast<PFN_vkGetDeviceQueue>(
vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue"));
- if (!vkGetDeviceQueueFn) {
+ if (!vkGetDeviceQueue) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetDeviceQueue";
return false;
}
- vkGetFenceStatusFn = reinterpret_cast<PFN_vkGetFenceStatus>(
+ vkGetDeviceQueue2 = reinterpret_cast<PFN_vkGetDeviceQueue2>(
+ vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue2"));
+ if (!vkGetDeviceQueue2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetDeviceQueue2";
+ return false;
+ }
+
+ vkGetFenceStatus = reinterpret_cast<PFN_vkGetFenceStatus>(
vkGetDeviceProcAddr(vk_device, "vkGetFenceStatus"));
- if (!vkGetFenceStatusFn) {
+ if (!vkGetFenceStatus) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetFenceStatus";
return false;
}
- vkGetImageMemoryRequirementsFn =
+ vkGetImageMemoryRequirements =
reinterpret_cast<PFN_vkGetImageMemoryRequirements>(
vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements"));
- if (!vkGetImageMemoryRequirementsFn) {
+ if (!vkGetImageMemoryRequirements) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetImageMemoryRequirements";
return false;
}
- vkMapMemoryFn = reinterpret_cast<PFN_vkMapMemory>(
+ vkGetImageMemoryRequirements2 =
+ reinterpret_cast<PFN_vkGetImageMemoryRequirements2>(
+ vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements2"));
+ if (!vkGetImageMemoryRequirements2) {
+ DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
+ << "vkGetImageMemoryRequirements2";
+ return false;
+ }
+
+ vkMapMemory = reinterpret_cast<PFN_vkMapMemory>(
vkGetDeviceProcAddr(vk_device, "vkMapMemory"));
- if (!vkMapMemoryFn) {
+ if (!vkMapMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkMapMemory";
return false;
}
- vkQueueSubmitFn = reinterpret_cast<PFN_vkQueueSubmit>(
+ vkQueueSubmit = reinterpret_cast<PFN_vkQueueSubmit>(
vkGetDeviceProcAddr(vk_device, "vkQueueSubmit"));
- if (!vkQueueSubmitFn) {
+ if (!vkQueueSubmit) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueueSubmit";
return false;
}
- vkQueueWaitIdleFn = reinterpret_cast<PFN_vkQueueWaitIdle>(
+ vkQueueWaitIdle = reinterpret_cast<PFN_vkQueueWaitIdle>(
vkGetDeviceProcAddr(vk_device, "vkQueueWaitIdle"));
- if (!vkQueueWaitIdleFn) {
+ if (!vkQueueWaitIdle) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueueWaitIdle";
return false;
}
- vkResetCommandBufferFn = reinterpret_cast<PFN_vkResetCommandBuffer>(
+ vkResetCommandBuffer = reinterpret_cast<PFN_vkResetCommandBuffer>(
vkGetDeviceProcAddr(vk_device, "vkResetCommandBuffer"));
- if (!vkResetCommandBufferFn) {
+ if (!vkResetCommandBuffer) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkResetCommandBuffer";
return false;
}
- vkResetFencesFn = reinterpret_cast<PFN_vkResetFences>(
+ vkResetFences = reinterpret_cast<PFN_vkResetFences>(
vkGetDeviceProcAddr(vk_device, "vkResetFences"));
- if (!vkResetFencesFn) {
+ if (!vkResetFences) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkResetFences";
return false;
}
- vkUnmapMemoryFn = reinterpret_cast<PFN_vkUnmapMemory>(
+ vkUnmapMemory = reinterpret_cast<PFN_vkUnmapMemory>(
vkGetDeviceProcAddr(vk_device, "vkUnmapMemory"));
- if (!vkUnmapMemoryFn) {
+ if (!vkUnmapMemory) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkUnmapMemory";
return false;
}
- vkUpdateDescriptorSetsFn = reinterpret_cast<PFN_vkUpdateDescriptorSets>(
+ vkUpdateDescriptorSets = reinterpret_cast<PFN_vkUpdateDescriptorSets>(
vkGetDeviceProcAddr(vk_device, "vkUpdateDescriptorSets"));
- if (!vkUpdateDescriptorSetsFn) {
+ if (!vkUpdateDescriptorSets) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkUpdateDescriptorSets";
return false;
}
- vkWaitForFencesFn = reinterpret_cast<PFN_vkWaitForFences>(
+ vkWaitForFences = reinterpret_cast<PFN_vkWaitForFences>(
vkGetDeviceProcAddr(vk_device, "vkWaitForFences"));
- if (!vkWaitForFencesFn) {
+ if (!vkWaitForFences) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkWaitForFences";
return false;
}
- if (api_version >= VK_API_VERSION_1_1) {
- vkGetDeviceQueue2Fn = reinterpret_cast<PFN_vkGetDeviceQueue2>(
- vkGetDeviceProcAddr(vk_device, "vkGetDeviceQueue2"));
- if (!vkGetDeviceQueue2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetDeviceQueue2";
- return false;
- }
-
- vkGetBufferMemoryRequirements2Fn =
- reinterpret_cast<PFN_vkGetBufferMemoryRequirements2>(
- vkGetDeviceProcAddr(vk_device, "vkGetBufferMemoryRequirements2"));
- if (!vkGetBufferMemoryRequirements2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetBufferMemoryRequirements2";
- return false;
- }
-
- vkGetImageMemoryRequirements2Fn =
- reinterpret_cast<PFN_vkGetImageMemoryRequirements2>(
- vkGetDeviceProcAddr(vk_device, "vkGetImageMemoryRequirements2"));
- if (!vkGetImageMemoryRequirements2Fn) {
- DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
- << "vkGetImageMemoryRequirements2";
- return false;
- }
- }
-
#if defined(OS_ANDROID)
if (gfx::HasExtension(
enabled_extensions,
VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME)) {
- vkGetAndroidHardwareBufferPropertiesANDROIDFn =
+ vkGetAndroidHardwareBufferPropertiesANDROID =
reinterpret_cast<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>(
vkGetDeviceProcAddr(vk_device,
"vkGetAndroidHardwareBufferPropertiesANDROID"));
- if (!vkGetAndroidHardwareBufferPropertiesANDROIDFn) {
+ if (!vkGetAndroidHardwareBufferPropertiesANDROID) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetAndroidHardwareBufferPropertiesANDROID";
return false;
@@ -865,17 +865,17 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_LINUX) || defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME)) {
- vkGetSemaphoreFdKHRFn = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
+ vkGetSemaphoreFdKHR = reinterpret_cast<PFN_vkGetSemaphoreFdKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetSemaphoreFdKHR"));
- if (!vkGetSemaphoreFdKHRFn) {
+ if (!vkGetSemaphoreFdKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreFdKHR";
return false;
}
- vkImportSemaphoreFdKHRFn = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
+ vkImportSemaphoreFdKHR = reinterpret_cast<PFN_vkImportSemaphoreFdKHR>(
vkGetDeviceProcAddr(vk_device, "vkImportSemaphoreFdKHR"));
- if (!vkImportSemaphoreFdKHRFn) {
+ if (!vkImportSemaphoreFdKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreFdKHR";
return false;
@@ -886,19 +886,19 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_SEMAPHORE_WIN32_EXTENSION_NAME)) {
- vkGetSemaphoreWin32HandleKHRFn =
+ vkGetSemaphoreWin32HandleKHR =
reinterpret_cast<PFN_vkGetSemaphoreWin32HandleKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetSemaphoreWin32HandleKHR"));
- if (!vkGetSemaphoreWin32HandleKHRFn) {
+ if (!vkGetSemaphoreWin32HandleKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreWin32HandleKHR";
return false;
}
- vkImportSemaphoreWin32HandleKHRFn =
+ vkImportSemaphoreWin32HandleKHR =
reinterpret_cast<PFN_vkImportSemaphoreWin32HandleKHR>(
vkGetDeviceProcAddr(vk_device, "vkImportSemaphoreWin32HandleKHR"));
- if (!vkImportSemaphoreWin32HandleKHRFn) {
+ if (!vkImportSemaphoreWin32HandleKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreWin32HandleKHR";
return false;
@@ -909,18 +909,18 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_LINUX) || defined(OS_ANDROID)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_MEMORY_FD_EXTENSION_NAME)) {
- vkGetMemoryFdKHRFn = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
+ vkGetMemoryFdKHR = reinterpret_cast<PFN_vkGetMemoryFdKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetMemoryFdKHR"));
- if (!vkGetMemoryFdKHRFn) {
+ if (!vkGetMemoryFdKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryFdKHR";
return false;
}
- vkGetMemoryFdPropertiesKHRFn =
+ vkGetMemoryFdPropertiesKHR =
reinterpret_cast<PFN_vkGetMemoryFdPropertiesKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetMemoryFdPropertiesKHR"));
- if (!vkGetMemoryFdPropertiesKHRFn) {
+ if (!vkGetMemoryFdPropertiesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryFdPropertiesKHR";
return false;
@@ -931,20 +931,19 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_WIN)
if (gfx::HasExtension(enabled_extensions,
VK_KHR_EXTERNAL_MEMORY_WIN32_EXTENSION_NAME)) {
- vkGetMemoryWin32HandleKHRFn =
- reinterpret_cast<PFN_vkGetMemoryWin32HandleKHR>(
- vkGetDeviceProcAddr(vk_device, "vkGetMemoryWin32HandleKHR"));
- if (!vkGetMemoryWin32HandleKHRFn) {
+ vkGetMemoryWin32HandleKHR = reinterpret_cast<PFN_vkGetMemoryWin32HandleKHR>(
+ vkGetDeviceProcAddr(vk_device, "vkGetMemoryWin32HandleKHR"));
+ if (!vkGetMemoryWin32HandleKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryWin32HandleKHR";
return false;
}
- vkGetMemoryWin32HandlePropertiesKHRFn =
+ vkGetMemoryWin32HandlePropertiesKHR =
reinterpret_cast<PFN_vkGetMemoryWin32HandlePropertiesKHR>(
vkGetDeviceProcAddr(vk_device,
"vkGetMemoryWin32HandlePropertiesKHR"));
- if (!vkGetMemoryWin32HandlePropertiesKHRFn) {
+ if (!vkGetMemoryWin32HandlePropertiesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryWin32HandlePropertiesKHR";
return false;
@@ -955,21 +954,21 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_EXTERNAL_SEMAPHORE_EXTENSION_NAME)) {
- vkImportSemaphoreZirconHandleFUCHSIAFn =
+ vkImportSemaphoreZirconHandleFUCHSIA =
reinterpret_cast<PFN_vkImportSemaphoreZirconHandleFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkImportSemaphoreZirconHandleFUCHSIA"));
- if (!vkImportSemaphoreZirconHandleFUCHSIAFn) {
+ if (!vkImportSemaphoreZirconHandleFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkImportSemaphoreZirconHandleFUCHSIA";
return false;
}
- vkGetSemaphoreZirconHandleFUCHSIAFn =
+ vkGetSemaphoreZirconHandleFUCHSIA =
reinterpret_cast<PFN_vkGetSemaphoreZirconHandleFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkGetSemaphoreZirconHandleFUCHSIA"));
- if (!vkGetSemaphoreZirconHandleFUCHSIAFn) {
+ if (!vkGetSemaphoreZirconHandleFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSemaphoreZirconHandleFUCHSIA";
return false;
@@ -980,10 +979,10 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_EXTERNAL_MEMORY_EXTENSION_NAME)) {
- vkGetMemoryZirconHandleFUCHSIAFn =
+ vkGetMemoryZirconHandleFUCHSIA =
reinterpret_cast<PFN_vkGetMemoryZirconHandleFUCHSIA>(
vkGetDeviceProcAddr(vk_device, "vkGetMemoryZirconHandleFUCHSIA"));
- if (!vkGetMemoryZirconHandleFUCHSIAFn) {
+ if (!vkGetMemoryZirconHandleFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetMemoryZirconHandleFUCHSIA";
return false;
@@ -994,39 +993,39 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#if defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions,
VK_FUCHSIA_BUFFER_COLLECTION_EXTENSION_NAME)) {
- vkCreateBufferCollectionFUCHSIAFn =
+ vkCreateBufferCollectionFUCHSIA =
reinterpret_cast<PFN_vkCreateBufferCollectionFUCHSIA>(
vkGetDeviceProcAddr(vk_device, "vkCreateBufferCollectionFUCHSIA"));
- if (!vkCreateBufferCollectionFUCHSIAFn) {
+ if (!vkCreateBufferCollectionFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateBufferCollectionFUCHSIA";
return false;
}
- vkSetBufferCollectionConstraintsFUCHSIAFn =
+ vkSetBufferCollectionConstraintsFUCHSIA =
reinterpret_cast<PFN_vkSetBufferCollectionConstraintsFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkSetBufferCollectionConstraintsFUCHSIA"));
- if (!vkSetBufferCollectionConstraintsFUCHSIAFn) {
+ if (!vkSetBufferCollectionConstraintsFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkSetBufferCollectionConstraintsFUCHSIA";
return false;
}
- vkGetBufferCollectionPropertiesFUCHSIAFn =
+ vkGetBufferCollectionPropertiesFUCHSIA =
reinterpret_cast<PFN_vkGetBufferCollectionPropertiesFUCHSIA>(
vkGetDeviceProcAddr(vk_device,
"vkGetBufferCollectionPropertiesFUCHSIA"));
- if (!vkGetBufferCollectionPropertiesFUCHSIAFn) {
+ if (!vkGetBufferCollectionPropertiesFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetBufferCollectionPropertiesFUCHSIA";
return false;
}
- vkDestroyBufferCollectionFUCHSIAFn =
+ vkDestroyBufferCollectionFUCHSIA =
reinterpret_cast<PFN_vkDestroyBufferCollectionFUCHSIA>(
vkGetDeviceProcAddr(vk_device, "vkDestroyBufferCollectionFUCHSIA"));
- if (!vkDestroyBufferCollectionFUCHSIAFn) {
+ if (!vkDestroyBufferCollectionFUCHSIA) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroyBufferCollectionFUCHSIA";
return false;
@@ -1035,41 +1034,41 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointers(
#endif // defined(OS_FUCHSIA)
if (gfx::HasExtension(enabled_extensions, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
- vkAcquireNextImageKHRFn = reinterpret_cast<PFN_vkAcquireNextImageKHR>(
+ vkAcquireNextImageKHR = reinterpret_cast<PFN_vkAcquireNextImageKHR>(
vkGetDeviceProcAddr(vk_device, "vkAcquireNextImageKHR"));
- if (!vkAcquireNextImageKHRFn) {
+ if (!vkAcquireNextImageKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkAcquireNextImageKHR";
return false;
}
- vkCreateSwapchainKHRFn = reinterpret_cast<PFN_vkCreateSwapchainKHR>(
+ vkCreateSwapchainKHR = reinterpret_cast<PFN_vkCreateSwapchainKHR>(
vkGetDeviceProcAddr(vk_device, "vkCreateSwapchainKHR"));
- if (!vkCreateSwapchainKHRFn) {
+ if (!vkCreateSwapchainKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkCreateSwapchainKHR";
return false;
}
- vkDestroySwapchainKHRFn = reinterpret_cast<PFN_vkDestroySwapchainKHR>(
+ vkDestroySwapchainKHR = reinterpret_cast<PFN_vkDestroySwapchainKHR>(
vkGetDeviceProcAddr(vk_device, "vkDestroySwapchainKHR"));
- if (!vkDestroySwapchainKHRFn) {
+ if (!vkDestroySwapchainKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkDestroySwapchainKHR";
return false;
}
- vkGetSwapchainImagesKHRFn = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(
+ vkGetSwapchainImagesKHR = reinterpret_cast<PFN_vkGetSwapchainImagesKHR>(
vkGetDeviceProcAddr(vk_device, "vkGetSwapchainImagesKHR"));
- if (!vkGetSwapchainImagesKHRFn) {
+ if (!vkGetSwapchainImagesKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkGetSwapchainImagesKHR";
return false;
}
- vkQueuePresentKHRFn = reinterpret_cast<PFN_vkQueuePresentKHR>(
+ vkQueuePresentKHR = reinterpret_cast<PFN_vkQueuePresentKHR>(
vkGetDeviceProcAddr(vk_device, "vkQueuePresentKHR"));
- if (!vkQueuePresentKHRFn) {
+ if (!vkQueuePresentKHR) {
DLOG(WARNING) << "Failed to bind vulkan entrypoint: "
<< "vkQueuePresentKHR";
return false;
diff --git a/chromium/gpu/vulkan/vulkan_function_pointers.h b/chromium/gpu/vulkan/vulkan_function_pointers.h
index cd696ce9963..9d9682c2976 100644
--- a/chromium/gpu/vulkan/vulkan_function_pointers.h
+++ b/chromium/gpu/vulkan/vulkan_function_pointers.h
@@ -44,6 +44,8 @@ namespace gpu {
struct VulkanFunctionPointers;
+constexpr uint32_t kVulkanRequiredApiVersion = VK_API_VERSION_1_1;
+
COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers* GetVulkanFunctionPointers();
struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
@@ -72,10 +74,10 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
public:
using Fn = R(VKAPI_PTR*)(Args...);
- explicit operator bool() { return !!fn_; }
+ explicit operator bool() const { return !!fn_; }
NO_SANITIZE("cfi-icall")
- R operator()(Args... args) { return fn_(args...); }
+ R operator()(Args... args) const { return fn_(args...); }
Fn get() const { return fn_; }
@@ -91,201 +93,197 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
};
// Unassociated functions
- VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersionFn;
- VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddrFn;
+ VulkanFunction<PFN_vkGetInstanceProcAddr> vkGetInstanceProcAddr;
- VulkanFunction<PFN_vkCreateInstance> vkCreateInstanceFn;
+ VulkanFunction<PFN_vkEnumerateInstanceVersion> vkEnumerateInstanceVersion;
+ VulkanFunction<PFN_vkCreateInstance> vkCreateInstance;
VulkanFunction<PFN_vkEnumerateInstanceExtensionProperties>
- vkEnumerateInstanceExtensionPropertiesFn;
+ vkEnumerateInstanceExtensionProperties;
VulkanFunction<PFN_vkEnumerateInstanceLayerProperties>
- vkEnumerateInstanceLayerPropertiesFn;
+ vkEnumerateInstanceLayerProperties;
// Instance functions
- VulkanFunction<PFN_vkCreateDevice> vkCreateDeviceFn;
- VulkanFunction<PFN_vkDestroyInstance> vkDestroyInstanceFn;
+ VulkanFunction<PFN_vkCreateDevice> vkCreateDevice;
+ VulkanFunction<PFN_vkDestroyInstance> vkDestroyInstance;
VulkanFunction<PFN_vkEnumerateDeviceExtensionProperties>
- vkEnumerateDeviceExtensionPropertiesFn;
+ vkEnumerateDeviceExtensionProperties;
VulkanFunction<PFN_vkEnumerateDeviceLayerProperties>
- vkEnumerateDeviceLayerPropertiesFn;
- VulkanFunction<PFN_vkEnumeratePhysicalDevices> vkEnumeratePhysicalDevicesFn;
- VulkanFunction<PFN_vkGetDeviceProcAddr> vkGetDeviceProcAddrFn;
- VulkanFunction<PFN_vkGetPhysicalDeviceFeatures> vkGetPhysicalDeviceFeaturesFn;
+ vkEnumerateDeviceLayerProperties;
+ VulkanFunction<PFN_vkEnumeratePhysicalDevices> vkEnumeratePhysicalDevices;
+ VulkanFunction<PFN_vkGetDeviceProcAddr> vkGetDeviceProcAddr;
+ VulkanFunction<PFN_vkGetPhysicalDeviceFeatures2> vkGetPhysicalDeviceFeatures2;
VulkanFunction<PFN_vkGetPhysicalDeviceFormatProperties>
- vkGetPhysicalDeviceFormatPropertiesFn;
+ vkGetPhysicalDeviceFormatProperties;
+ VulkanFunction<PFN_vkGetPhysicalDeviceImageFormatProperties2>
+ vkGetPhysicalDeviceImageFormatProperties2;
VulkanFunction<PFN_vkGetPhysicalDeviceMemoryProperties>
- vkGetPhysicalDeviceMemoryPropertiesFn;
+ vkGetPhysicalDeviceMemoryProperties;
+ VulkanFunction<PFN_vkGetPhysicalDeviceMemoryProperties2>
+ vkGetPhysicalDeviceMemoryProperties2;
VulkanFunction<PFN_vkGetPhysicalDeviceProperties>
- vkGetPhysicalDevicePropertiesFn;
+ vkGetPhysicalDeviceProperties;
VulkanFunction<PFN_vkGetPhysicalDeviceQueueFamilyProperties>
- vkGetPhysicalDeviceQueueFamilyPropertiesFn;
+ vkGetPhysicalDeviceQueueFamilyProperties;
#if DCHECK_IS_ON()
VulkanFunction<PFN_vkCreateDebugReportCallbackEXT>
- vkCreateDebugReportCallbackEXTFn;
+ vkCreateDebugReportCallbackEXT;
VulkanFunction<PFN_vkDestroyDebugReportCallbackEXT>
- vkDestroyDebugReportCallbackEXTFn;
+ vkDestroyDebugReportCallbackEXT;
#endif // DCHECK_IS_ON()
- VulkanFunction<PFN_vkDestroySurfaceKHR> vkDestroySurfaceKHRFn;
+ VulkanFunction<PFN_vkDestroySurfaceKHR> vkDestroySurfaceKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR>
- vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn;
+ vkGetPhysicalDeviceSurfaceCapabilitiesKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceFormatsKHR>
- vkGetPhysicalDeviceSurfaceFormatsKHRFn;
+ vkGetPhysicalDeviceSurfaceFormatsKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceSurfaceSupportKHR>
- vkGetPhysicalDeviceSurfaceSupportKHRFn;
+ vkGetPhysicalDeviceSurfaceSupportKHR;
#if defined(USE_VULKAN_XLIB)
- VulkanFunction<PFN_vkCreateXlibSurfaceKHR> vkCreateXlibSurfaceKHRFn;
+ VulkanFunction<PFN_vkCreateXlibSurfaceKHR> vkCreateXlibSurfaceKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR>
- vkGetPhysicalDeviceXlibPresentationSupportKHRFn;
+ vkGetPhysicalDeviceXlibPresentationSupportKHR;
#endif // defined(USE_VULKAN_XLIB)
#if defined(OS_WIN)
- VulkanFunction<PFN_vkCreateWin32SurfaceKHR> vkCreateWin32SurfaceKHRFn;
+ VulkanFunction<PFN_vkCreateWin32SurfaceKHR> vkCreateWin32SurfaceKHR;
VulkanFunction<PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR>
- vkGetPhysicalDeviceWin32PresentationSupportKHRFn;
+ vkGetPhysicalDeviceWin32PresentationSupportKHR;
#endif // defined(OS_WIN)
#if defined(OS_ANDROID)
- VulkanFunction<PFN_vkCreateAndroidSurfaceKHR> vkCreateAndroidSurfaceKHRFn;
+ VulkanFunction<PFN_vkCreateAndroidSurfaceKHR> vkCreateAndroidSurfaceKHR;
#endif // defined(OS_ANDROID)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkCreateImagePipeSurfaceFUCHSIA>
- vkCreateImagePipeSurfaceFUCHSIAFn;
+ vkCreateImagePipeSurfaceFUCHSIA;
#endif // defined(OS_FUCHSIA)
- VulkanFunction<PFN_vkGetPhysicalDeviceImageFormatProperties2>
- vkGetPhysicalDeviceImageFormatProperties2Fn;
-
- VulkanFunction<PFN_vkGetPhysicalDeviceFeatures2>
- vkGetPhysicalDeviceFeatures2Fn;
-
// Device functions
- VulkanFunction<PFN_vkAllocateCommandBuffers> vkAllocateCommandBuffersFn;
- VulkanFunction<PFN_vkAllocateDescriptorSets> vkAllocateDescriptorSetsFn;
- VulkanFunction<PFN_vkAllocateMemory> vkAllocateMemoryFn;
- VulkanFunction<PFN_vkBeginCommandBuffer> vkBeginCommandBufferFn;
- VulkanFunction<PFN_vkBindBufferMemory> vkBindBufferMemoryFn;
- VulkanFunction<PFN_vkBindImageMemory> vkBindImageMemoryFn;
- VulkanFunction<PFN_vkCmdBeginRenderPass> vkCmdBeginRenderPassFn;
- VulkanFunction<PFN_vkCmdCopyBuffer> vkCmdCopyBufferFn;
- VulkanFunction<PFN_vkCmdCopyBufferToImage> vkCmdCopyBufferToImageFn;
- VulkanFunction<PFN_vkCmdEndRenderPass> vkCmdEndRenderPassFn;
- VulkanFunction<PFN_vkCmdExecuteCommands> vkCmdExecuteCommandsFn;
- VulkanFunction<PFN_vkCmdNextSubpass> vkCmdNextSubpassFn;
- VulkanFunction<PFN_vkCmdPipelineBarrier> vkCmdPipelineBarrierFn;
- VulkanFunction<PFN_vkCreateBuffer> vkCreateBufferFn;
- VulkanFunction<PFN_vkCreateCommandPool> vkCreateCommandPoolFn;
- VulkanFunction<PFN_vkCreateDescriptorPool> vkCreateDescriptorPoolFn;
- VulkanFunction<PFN_vkCreateDescriptorSetLayout> vkCreateDescriptorSetLayoutFn;
- VulkanFunction<PFN_vkCreateFence> vkCreateFenceFn;
- VulkanFunction<PFN_vkCreateFramebuffer> vkCreateFramebufferFn;
- VulkanFunction<PFN_vkCreateImage> vkCreateImageFn;
- VulkanFunction<PFN_vkCreateImageView> vkCreateImageViewFn;
- VulkanFunction<PFN_vkCreateRenderPass> vkCreateRenderPassFn;
- VulkanFunction<PFN_vkCreateSampler> vkCreateSamplerFn;
- VulkanFunction<PFN_vkCreateSemaphore> vkCreateSemaphoreFn;
- VulkanFunction<PFN_vkCreateShaderModule> vkCreateShaderModuleFn;
- VulkanFunction<PFN_vkDestroyBuffer> vkDestroyBufferFn;
- VulkanFunction<PFN_vkDestroyCommandPool> vkDestroyCommandPoolFn;
- VulkanFunction<PFN_vkDestroyDescriptorPool> vkDestroyDescriptorPoolFn;
- VulkanFunction<PFN_vkDestroyDescriptorSetLayout>
- vkDestroyDescriptorSetLayoutFn;
- VulkanFunction<PFN_vkDestroyDevice> vkDestroyDeviceFn;
- VulkanFunction<PFN_vkDestroyFence> vkDestroyFenceFn;
- VulkanFunction<PFN_vkDestroyFramebuffer> vkDestroyFramebufferFn;
- VulkanFunction<PFN_vkDestroyImage> vkDestroyImageFn;
- VulkanFunction<PFN_vkDestroyImageView> vkDestroyImageViewFn;
- VulkanFunction<PFN_vkDestroyRenderPass> vkDestroyRenderPassFn;
- VulkanFunction<PFN_vkDestroySampler> vkDestroySamplerFn;
- VulkanFunction<PFN_vkDestroySemaphore> vkDestroySemaphoreFn;
- VulkanFunction<PFN_vkDestroyShaderModule> vkDestroyShaderModuleFn;
- VulkanFunction<PFN_vkDeviceWaitIdle> vkDeviceWaitIdleFn;
- VulkanFunction<PFN_vkFlushMappedMemoryRanges> vkFlushMappedMemoryRangesFn;
- VulkanFunction<PFN_vkEndCommandBuffer> vkEndCommandBufferFn;
- VulkanFunction<PFN_vkFreeCommandBuffers> vkFreeCommandBuffersFn;
- VulkanFunction<PFN_vkFreeDescriptorSets> vkFreeDescriptorSetsFn;
- VulkanFunction<PFN_vkFreeMemory> vkFreeMemoryFn;
+ VulkanFunction<PFN_vkAllocateCommandBuffers> vkAllocateCommandBuffers;
+ VulkanFunction<PFN_vkAllocateDescriptorSets> vkAllocateDescriptorSets;
+ VulkanFunction<PFN_vkAllocateMemory> vkAllocateMemory;
+ VulkanFunction<PFN_vkBeginCommandBuffer> vkBeginCommandBuffer;
+ VulkanFunction<PFN_vkBindBufferMemory> vkBindBufferMemory;
+ VulkanFunction<PFN_vkBindBufferMemory2> vkBindBufferMemory2;
+ VulkanFunction<PFN_vkBindImageMemory> vkBindImageMemory;
+ VulkanFunction<PFN_vkBindImageMemory2> vkBindImageMemory2;
+ VulkanFunction<PFN_vkCmdBeginRenderPass> vkCmdBeginRenderPass;
+ VulkanFunction<PFN_vkCmdCopyBuffer> vkCmdCopyBuffer;
+ VulkanFunction<PFN_vkCmdCopyBufferToImage> vkCmdCopyBufferToImage;
+ VulkanFunction<PFN_vkCmdEndRenderPass> vkCmdEndRenderPass;
+ VulkanFunction<PFN_vkCmdExecuteCommands> vkCmdExecuteCommands;
+ VulkanFunction<PFN_vkCmdNextSubpass> vkCmdNextSubpass;
+ VulkanFunction<PFN_vkCmdPipelineBarrier> vkCmdPipelineBarrier;
+ VulkanFunction<PFN_vkCreateBuffer> vkCreateBuffer;
+ VulkanFunction<PFN_vkCreateCommandPool> vkCreateCommandPool;
+ VulkanFunction<PFN_vkCreateDescriptorPool> vkCreateDescriptorPool;
+ VulkanFunction<PFN_vkCreateDescriptorSetLayout> vkCreateDescriptorSetLayout;
+ VulkanFunction<PFN_vkCreateFence> vkCreateFence;
+ VulkanFunction<PFN_vkCreateFramebuffer> vkCreateFramebuffer;
+ VulkanFunction<PFN_vkCreateImage> vkCreateImage;
+ VulkanFunction<PFN_vkCreateImageView> vkCreateImageView;
+ VulkanFunction<PFN_vkCreateRenderPass> vkCreateRenderPass;
+ VulkanFunction<PFN_vkCreateSampler> vkCreateSampler;
+ VulkanFunction<PFN_vkCreateSemaphore> vkCreateSemaphore;
+ VulkanFunction<PFN_vkCreateShaderModule> vkCreateShaderModule;
+ VulkanFunction<PFN_vkDestroyBuffer> vkDestroyBuffer;
+ VulkanFunction<PFN_vkDestroyCommandPool> vkDestroyCommandPool;
+ VulkanFunction<PFN_vkDestroyDescriptorPool> vkDestroyDescriptorPool;
+ VulkanFunction<PFN_vkDestroyDescriptorSetLayout> vkDestroyDescriptorSetLayout;
+ VulkanFunction<PFN_vkDestroyDevice> vkDestroyDevice;
+ VulkanFunction<PFN_vkDestroyFence> vkDestroyFence;
+ VulkanFunction<PFN_vkDestroyFramebuffer> vkDestroyFramebuffer;
+ VulkanFunction<PFN_vkDestroyImage> vkDestroyImage;
+ VulkanFunction<PFN_vkDestroyImageView> vkDestroyImageView;
+ VulkanFunction<PFN_vkDestroyRenderPass> vkDestroyRenderPass;
+ VulkanFunction<PFN_vkDestroySampler> vkDestroySampler;
+ VulkanFunction<PFN_vkDestroySemaphore> vkDestroySemaphore;
+ VulkanFunction<PFN_vkDestroyShaderModule> vkDestroyShaderModule;
+ VulkanFunction<PFN_vkDeviceWaitIdle> vkDeviceWaitIdle;
+ VulkanFunction<PFN_vkFlushMappedMemoryRanges> vkFlushMappedMemoryRanges;
+ VulkanFunction<PFN_vkEndCommandBuffer> vkEndCommandBuffer;
+ VulkanFunction<PFN_vkFreeCommandBuffers> vkFreeCommandBuffers;
+ VulkanFunction<PFN_vkFreeDescriptorSets> vkFreeDescriptorSets;
+ VulkanFunction<PFN_vkFreeMemory> vkFreeMemory;
VulkanFunction<PFN_vkInvalidateMappedMemoryRanges>
- vkInvalidateMappedMemoryRangesFn;
+ vkInvalidateMappedMemoryRanges;
VulkanFunction<PFN_vkGetBufferMemoryRequirements>
- vkGetBufferMemoryRequirementsFn;
- VulkanFunction<PFN_vkGetDeviceQueue> vkGetDeviceQueueFn;
- VulkanFunction<PFN_vkGetFenceStatus> vkGetFenceStatusFn;
- VulkanFunction<PFN_vkGetImageMemoryRequirements>
- vkGetImageMemoryRequirementsFn;
- VulkanFunction<PFN_vkMapMemory> vkMapMemoryFn;
- VulkanFunction<PFN_vkQueueSubmit> vkQueueSubmitFn;
- VulkanFunction<PFN_vkQueueWaitIdle> vkQueueWaitIdleFn;
- VulkanFunction<PFN_vkResetCommandBuffer> vkResetCommandBufferFn;
- VulkanFunction<PFN_vkResetFences> vkResetFencesFn;
- VulkanFunction<PFN_vkUnmapMemory> vkUnmapMemoryFn;
- VulkanFunction<PFN_vkUpdateDescriptorSets> vkUpdateDescriptorSetsFn;
- VulkanFunction<PFN_vkWaitForFences> vkWaitForFencesFn;
-
- VulkanFunction<PFN_vkGetDeviceQueue2> vkGetDeviceQueue2Fn;
+ vkGetBufferMemoryRequirements;
VulkanFunction<PFN_vkGetBufferMemoryRequirements2>
- vkGetBufferMemoryRequirements2Fn;
+ vkGetBufferMemoryRequirements2;
+ VulkanFunction<PFN_vkGetDeviceQueue> vkGetDeviceQueue;
+ VulkanFunction<PFN_vkGetDeviceQueue2> vkGetDeviceQueue2;
+ VulkanFunction<PFN_vkGetFenceStatus> vkGetFenceStatus;
+ VulkanFunction<PFN_vkGetImageMemoryRequirements> vkGetImageMemoryRequirements;
VulkanFunction<PFN_vkGetImageMemoryRequirements2>
- vkGetImageMemoryRequirements2Fn;
+ vkGetImageMemoryRequirements2;
+ VulkanFunction<PFN_vkMapMemory> vkMapMemory;
+ VulkanFunction<PFN_vkQueueSubmit> vkQueueSubmit;
+ VulkanFunction<PFN_vkQueueWaitIdle> vkQueueWaitIdle;
+ VulkanFunction<PFN_vkResetCommandBuffer> vkResetCommandBuffer;
+ VulkanFunction<PFN_vkResetFences> vkResetFences;
+ VulkanFunction<PFN_vkUnmapMemory> vkUnmapMemory;
+ VulkanFunction<PFN_vkUpdateDescriptorSets> vkUpdateDescriptorSets;
+ VulkanFunction<PFN_vkWaitForFences> vkWaitForFences;
#if defined(OS_ANDROID)
VulkanFunction<PFN_vkGetAndroidHardwareBufferPropertiesANDROID>
- vkGetAndroidHardwareBufferPropertiesANDROIDFn;
+ vkGetAndroidHardwareBufferPropertiesANDROID;
#endif // defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
- VulkanFunction<PFN_vkGetSemaphoreFdKHR> vkGetSemaphoreFdKHRFn;
- VulkanFunction<PFN_vkImportSemaphoreFdKHR> vkImportSemaphoreFdKHRFn;
+ VulkanFunction<PFN_vkGetSemaphoreFdKHR> vkGetSemaphoreFdKHR;
+ VulkanFunction<PFN_vkImportSemaphoreFdKHR> vkImportSemaphoreFdKHR;
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_WIN)
- VulkanFunction<PFN_vkGetSemaphoreWin32HandleKHR>
- vkGetSemaphoreWin32HandleKHRFn;
+ VulkanFunction<PFN_vkGetSemaphoreWin32HandleKHR> vkGetSemaphoreWin32HandleKHR;
VulkanFunction<PFN_vkImportSemaphoreWin32HandleKHR>
- vkImportSemaphoreWin32HandleKHRFn;
+ vkImportSemaphoreWin32HandleKHR;
#endif // defined(OS_WIN)
#if defined(OS_LINUX) || defined(OS_ANDROID)
- VulkanFunction<PFN_vkGetMemoryFdKHR> vkGetMemoryFdKHRFn;
- VulkanFunction<PFN_vkGetMemoryFdPropertiesKHR> vkGetMemoryFdPropertiesKHRFn;
+ VulkanFunction<PFN_vkGetMemoryFdKHR> vkGetMemoryFdKHR;
+ VulkanFunction<PFN_vkGetMemoryFdPropertiesKHR> vkGetMemoryFdPropertiesKHR;
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#if defined(OS_WIN)
- VulkanFunction<PFN_vkGetMemoryWin32HandleKHR> vkGetMemoryWin32HandleKHRFn;
+ VulkanFunction<PFN_vkGetMemoryWin32HandleKHR> vkGetMemoryWin32HandleKHR;
VulkanFunction<PFN_vkGetMemoryWin32HandlePropertiesKHR>
- vkGetMemoryWin32HandlePropertiesKHRFn;
+ vkGetMemoryWin32HandlePropertiesKHR;
#endif // defined(OS_WIN)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkImportSemaphoreZirconHandleFUCHSIA>
- vkImportSemaphoreZirconHandleFUCHSIAFn;
+ vkImportSemaphoreZirconHandleFUCHSIA;
VulkanFunction<PFN_vkGetSemaphoreZirconHandleFUCHSIA>
- vkGetSemaphoreZirconHandleFUCHSIAFn;
+ vkGetSemaphoreZirconHandleFUCHSIA;
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkGetMemoryZirconHandleFUCHSIA>
- vkGetMemoryZirconHandleFUCHSIAFn;
+ vkGetMemoryZirconHandleFUCHSIA;
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
VulkanFunction<PFN_vkCreateBufferCollectionFUCHSIA>
- vkCreateBufferCollectionFUCHSIAFn;
+ vkCreateBufferCollectionFUCHSIA;
VulkanFunction<PFN_vkSetBufferCollectionConstraintsFUCHSIA>
- vkSetBufferCollectionConstraintsFUCHSIAFn;
+ vkSetBufferCollectionConstraintsFUCHSIA;
VulkanFunction<PFN_vkGetBufferCollectionPropertiesFUCHSIA>
- vkGetBufferCollectionPropertiesFUCHSIAFn;
+ vkGetBufferCollectionPropertiesFUCHSIA;
VulkanFunction<PFN_vkDestroyBufferCollectionFUCHSIA>
- vkDestroyBufferCollectionFUCHSIAFn;
+ vkDestroyBufferCollectionFUCHSIA;
#endif // defined(OS_FUCHSIA)
- VulkanFunction<PFN_vkAcquireNextImageKHR> vkAcquireNextImageKHRFn;
- VulkanFunction<PFN_vkCreateSwapchainKHR> vkCreateSwapchainKHRFn;
- VulkanFunction<PFN_vkDestroySwapchainKHR> vkDestroySwapchainKHRFn;
- VulkanFunction<PFN_vkGetSwapchainImagesKHR> vkGetSwapchainImagesKHRFn;
- VulkanFunction<PFN_vkQueuePresentKHR> vkQueuePresentKHRFn;
+ VulkanFunction<PFN_vkAcquireNextImageKHR> vkAcquireNextImageKHR;
+ VulkanFunction<PFN_vkCreateSwapchainKHR> vkCreateSwapchainKHR;
+ VulkanFunction<PFN_vkDestroySwapchainKHR> vkDestroySwapchainKHR;
+ VulkanFunction<PFN_vkGetSwapchainImagesKHR> vkGetSwapchainImagesKHR;
+ VulkanFunction<PFN_vkQueuePresentKHR> vkQueuePresentKHR;
};
} // namespace gpu
@@ -293,18 +291,18 @@ struct COMPONENT_EXPORT(VULKAN) VulkanFunctionPointers {
// Unassociated functions
ALWAYS_INLINE PFN_vkVoidFunction vkGetInstanceProcAddr(VkInstance instance,
const char* pName) {
- return gpu::GetVulkanFunctionPointers()->vkGetInstanceProcAddrFn(instance,
- pName);
+ return gpu::GetVulkanFunctionPointers()->vkGetInstanceProcAddr(instance,
+ pName);
}
+
ALWAYS_INLINE VkResult vkEnumerateInstanceVersion(uint32_t* pApiVersion) {
- return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceVersionFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceVersion(
pApiVersion);
}
-
ALWAYS_INLINE VkResult vkCreateInstance(const VkInstanceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkInstance* pInstance) {
- return gpu::GetVulkanFunctionPointers()->vkCreateInstanceFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateInstance(
pCreateInfo, pAllocator, pInstance);
}
ALWAYS_INLINE VkResult
@@ -312,13 +310,13 @@ vkEnumerateInstanceExtensionProperties(const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkEnumerateInstanceExtensionPropertiesFn(pLayerName, pPropertyCount,
- pProperties);
+ ->vkEnumerateInstanceExtensionProperties(pLayerName, pPropertyCount,
+ pProperties);
}
ALWAYS_INLINE VkResult
vkEnumerateInstanceLayerProperties(uint32_t* pPropertyCount,
VkLayerProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceLayerPropertiesFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateInstanceLayerProperties(
pPropertyCount, pProperties);
}
@@ -327,66 +325,77 @@ ALWAYS_INLINE VkResult vkCreateDevice(VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDevice* pDevice) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDeviceFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDevice(
physicalDevice, pCreateInfo, pAllocator, pDevice);
}
ALWAYS_INLINE void vkDestroyInstance(VkInstance instance,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyInstanceFn(instance,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyInstance(instance,
+ pAllocator);
}
ALWAYS_INLINE VkResult
vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkEnumerateDeviceExtensionPropertiesFn(physicalDevice, pLayerName,
- pPropertyCount, pProperties);
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceExtensionProperties(
+ physicalDevice, pLayerName, pPropertyCount, pProperties);
}
ALWAYS_INLINE VkResult
vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice,
uint32_t* pPropertyCount,
VkLayerProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceLayerPropertiesFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumerateDeviceLayerProperties(
physicalDevice, pPropertyCount, pProperties);
}
ALWAYS_INLINE VkResult
vkEnumeratePhysicalDevices(VkInstance instance,
uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices) {
- return gpu::GetVulkanFunctionPointers()->vkEnumeratePhysicalDevicesFn(
+ return gpu::GetVulkanFunctionPointers()->vkEnumeratePhysicalDevices(
instance, pPhysicalDeviceCount, pPhysicalDevices);
}
ALWAYS_INLINE PFN_vkVoidFunction vkGetDeviceProcAddr(VkDevice device,
const char* pName) {
- return gpu::GetVulkanFunctionPointers()->vkGetDeviceProcAddrFn(device, pName);
+ return gpu::GetVulkanFunctionPointers()->vkGetDeviceProcAddr(device, pName);
}
-ALWAYS_INLINE void vkGetPhysicalDeviceFeatures(
+ALWAYS_INLINE void vkGetPhysicalDeviceFeatures2(
VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceFeatures* pFeatures) {
- return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFeaturesFn(
+ VkPhysicalDeviceFeatures2* pFeatures) {
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFeatures2(
physicalDevice, pFeatures);
}
ALWAYS_INLINE void vkGetPhysicalDeviceFormatProperties(
VkPhysicalDevice physicalDevice,
VkFormat format,
VkFormatProperties* pFormatProperties) {
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFormatProperties(
+ physicalDevice, format, pFormatProperties);
+}
+ALWAYS_INLINE VkResult vkGetPhysicalDeviceImageFormatProperties2(
+ VkPhysicalDevice physicalDevice,
+ const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
+ VkImageFormatProperties2* pImageFormatProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceFormatPropertiesFn(physicalDevice, format,
- pFormatProperties);
+ ->vkGetPhysicalDeviceImageFormatProperties2(
+ physicalDevice, pImageFormatInfo, pImageFormatProperties);
}
ALWAYS_INLINE void vkGetPhysicalDeviceMemoryProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceMemoryProperties* pMemoryProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceMemoryPropertiesFn(physicalDevice,
- pMemoryProperties);
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceMemoryProperties(
+ physicalDevice, pMemoryProperties);
+}
+ALWAYS_INLINE void vkGetPhysicalDeviceMemoryProperties2(
+ VkPhysicalDevice physicalDevice,
+ VkPhysicalDeviceMemoryProperties2* pMemoryProperties) {
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceMemoryProperties2(
+ physicalDevice, pMemoryProperties);
}
ALWAYS_INLINE void vkGetPhysicalDeviceProperties(
VkPhysicalDevice physicalDevice,
VkPhysicalDeviceProperties* pProperties) {
- return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDevicePropertiesFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceProperties(
physicalDevice, pProperties);
}
ALWAYS_INLINE void vkGetPhysicalDeviceQueueFamilyProperties(
@@ -394,7 +403,7 @@ ALWAYS_INLINE void vkGetPhysicalDeviceQueueFamilyProperties(
uint32_t* pQueueFamilyPropertyCount,
VkQueueFamilyProperties* pQueueFamilyProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceQueueFamilyPropertiesFn(
+ ->vkGetPhysicalDeviceQueueFamilyProperties(
physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
}
@@ -404,14 +413,14 @@ ALWAYS_INLINE VkResult vkCreateDebugReportCallbackEXT(
const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDebugReportCallbackEXT* pCallback) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDebugReportCallbackEXTFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDebugReportCallbackEXT(
instance, pCreateInfo, pAllocator, pCallback);
}
ALWAYS_INLINE void vkDestroyDebugReportCallbackEXT(
VkInstance instance,
VkDebugReportCallbackEXT callback,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDebugReportCallbackEXTFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDebugReportCallbackEXT(
instance, callback, pAllocator);
}
#endif // DCHECK_IS_ON()
@@ -420,7 +429,7 @@ ALWAYS_INLINE void vkDestroySurfaceKHR(
VkInstance instance,
VkSurfaceKHR surface,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroySurfaceKHR(
instance, surface, pAllocator);
}
ALWAYS_INLINE VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
@@ -428,26 +437,24 @@ ALWAYS_INLINE VkResult vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
VkSurfaceKHR surface,
VkSurfaceCapabilitiesKHR* pSurfaceCapabilities) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceSurfaceCapabilitiesKHRFn(physicalDevice, surface,
- pSurfaceCapabilities);
+ ->vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice, surface,
+ pSurfaceCapabilities);
}
ALWAYS_INLINE VkResult
vkGetPhysicalDeviceSurfaceFormatsKHR(VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
uint32_t* pSurfaceFormatCount,
VkSurfaceFormatKHR* pSurfaceFormats) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceSurfaceFormatsKHRFn(
- physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceSurfaceFormatsKHR(
+ physicalDevice, surface, pSurfaceFormatCount, pSurfaceFormats);
}
ALWAYS_INLINE VkResult
vkGetPhysicalDeviceSurfaceSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
VkSurfaceKHR surface,
VkBool32* pSupported) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceSurfaceSupportKHRFn(physicalDevice, queueFamilyIndex,
- surface, pSupported);
+ return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceSurfaceSupportKHR(
+ physicalDevice, queueFamilyIndex, surface, pSupported);
}
#if defined(USE_VULKAN_XLIB)
@@ -456,7 +463,7 @@ vkCreateXlibSurfaceKHR(VkInstance instance,
const VkXlibSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateXlibSurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateXlibSurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
ALWAYS_INLINE VkBool32
@@ -465,7 +472,7 @@ vkGetPhysicalDeviceXlibPresentationSupportKHR(VkPhysicalDevice physicalDevice,
Display* dpy,
VisualID visualID) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceXlibPresentationSupportKHRFn(
+ ->vkGetPhysicalDeviceXlibPresentationSupportKHR(
physicalDevice, queueFamilyIndex, dpy, visualID);
}
#endif // defined(USE_VULKAN_XLIB)
@@ -476,15 +483,15 @@ vkCreateWin32SurfaceKHR(VkInstance instance,
const VkWin32SurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateWin32SurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateWin32SurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
ALWAYS_INLINE VkBool32
vkGetPhysicalDeviceWin32PresentationSupportKHR(VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceWin32PresentationSupportKHRFn(physicalDevice,
- queueFamilyIndex);
+ ->vkGetPhysicalDeviceWin32PresentationSupportKHR(physicalDevice,
+ queueFamilyIndex);
}
#endif // defined(OS_WIN)
@@ -494,7 +501,7 @@ vkCreateAndroidSurfaceKHR(VkInstance instance,
const VkAndroidSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateAndroidSurfaceKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateAndroidSurfaceKHR(
instance, pCreateInfo, pAllocator, pSurface);
}
#endif // defined(OS_ANDROID)
@@ -505,40 +512,24 @@ ALWAYS_INLINE VkResult vkCreateImagePipeSurfaceFUCHSIA(
const VkImagePipeSurfaceCreateInfoFUCHSIA* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface) {
- return gpu::GetVulkanFunctionPointers()->vkCreateImagePipeSurfaceFUCHSIAFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateImagePipeSurfaceFUCHSIA(
instance, pCreateInfo, pAllocator, pSurface);
}
#endif // defined(OS_FUCHSIA)
-ALWAYS_INLINE VkResult vkGetPhysicalDeviceImageFormatProperties2(
- VkPhysicalDevice physicalDevice,
- const VkPhysicalDeviceImageFormatInfo2* pImageFormatInfo,
- VkImageFormatProperties2* pImageFormatProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetPhysicalDeviceImageFormatProperties2Fn(
- physicalDevice, pImageFormatInfo, pImageFormatProperties);
-}
-
-ALWAYS_INLINE void vkGetPhysicalDeviceFeatures2(
- VkPhysicalDevice physicalDevice,
- VkPhysicalDeviceFeatures2* pFeatures) {
- return gpu::GetVulkanFunctionPointers()->vkGetPhysicalDeviceFeatures2Fn(
- physicalDevice, pFeatures);
-}
-
// Device functions
ALWAYS_INLINE VkResult
vkAllocateCommandBuffers(VkDevice device,
const VkCommandBufferAllocateInfo* pAllocateInfo,
VkCommandBuffer* pCommandBuffers) {
- return gpu::GetVulkanFunctionPointers()->vkAllocateCommandBuffersFn(
+ return gpu::GetVulkanFunctionPointers()->vkAllocateCommandBuffers(
device, pAllocateInfo, pCommandBuffers);
}
ALWAYS_INLINE VkResult
vkAllocateDescriptorSets(VkDevice device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets) {
- return gpu::GetVulkanFunctionPointers()->vkAllocateDescriptorSetsFn(
+ return gpu::GetVulkanFunctionPointers()->vkAllocateDescriptorSets(
device, pAllocateInfo, pDescriptorSets);
}
ALWAYS_INLINE VkResult
@@ -546,34 +537,48 @@ vkAllocateMemory(VkDevice device,
const VkMemoryAllocateInfo* pAllocateInfo,
const VkAllocationCallbacks* pAllocator,
VkDeviceMemory* pMemory) {
- return gpu::GetVulkanFunctionPointers()->vkAllocateMemoryFn(
+ return gpu::GetVulkanFunctionPointers()->vkAllocateMemory(
device, pAllocateInfo, pAllocator, pMemory);
}
ALWAYS_INLINE VkResult
vkBeginCommandBuffer(VkCommandBuffer commandBuffer,
const VkCommandBufferBeginInfo* pBeginInfo) {
- return gpu::GetVulkanFunctionPointers()->vkBeginCommandBufferFn(commandBuffer,
- pBeginInfo);
+ return gpu::GetVulkanFunctionPointers()->vkBeginCommandBuffer(commandBuffer,
+ pBeginInfo);
}
ALWAYS_INLINE VkResult vkBindBufferMemory(VkDevice device,
VkBuffer buffer,
VkDeviceMemory memory,
VkDeviceSize memoryOffset) {
- return gpu::GetVulkanFunctionPointers()->vkBindBufferMemoryFn(
+ return gpu::GetVulkanFunctionPointers()->vkBindBufferMemory(
device, buffer, memory, memoryOffset);
}
+ALWAYS_INLINE VkResult
+vkBindBufferMemory2(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindBufferMemoryInfo* pBindInfos) {
+ return gpu::GetVulkanFunctionPointers()->vkBindBufferMemory2(
+ device, bindInfoCount, pBindInfos);
+}
ALWAYS_INLINE VkResult vkBindImageMemory(VkDevice device,
VkImage image,
VkDeviceMemory memory,
VkDeviceSize memoryOffset) {
- return gpu::GetVulkanFunctionPointers()->vkBindImageMemoryFn(
+ return gpu::GetVulkanFunctionPointers()->vkBindImageMemory(
device, image, memory, memoryOffset);
}
+ALWAYS_INLINE VkResult
+vkBindImageMemory2(VkDevice device,
+ uint32_t bindInfoCount,
+ const VkBindImageMemoryInfo* pBindInfos) {
+ return gpu::GetVulkanFunctionPointers()->vkBindImageMemory2(
+ device, bindInfoCount, pBindInfos);
+}
ALWAYS_INLINE void vkCmdBeginRenderPass(
VkCommandBuffer commandBuffer,
const VkRenderPassBeginInfo* pRenderPassBegin,
VkSubpassContents contents) {
- return gpu::GetVulkanFunctionPointers()->vkCmdBeginRenderPassFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdBeginRenderPass(
commandBuffer, pRenderPassBegin, contents);
}
ALWAYS_INLINE void vkCmdCopyBuffer(VkCommandBuffer commandBuffer,
@@ -581,7 +586,7 @@ ALWAYS_INLINE void vkCmdCopyBuffer(VkCommandBuffer commandBuffer,
VkBuffer dstBuffer,
uint32_t regionCount,
const VkBufferCopy* pRegions) {
- return gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdCopyBuffer(
commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
}
ALWAYS_INLINE void vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer,
@@ -590,24 +595,24 @@ ALWAYS_INLINE void vkCmdCopyBufferToImage(VkCommandBuffer commandBuffer,
VkImageLayout dstImageLayout,
uint32_t regionCount,
const VkBufferImageCopy* pRegions) {
- return gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferToImageFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdCopyBufferToImage(
commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount,
pRegions);
}
ALWAYS_INLINE void vkCmdEndRenderPass(VkCommandBuffer commandBuffer) {
- return gpu::GetVulkanFunctionPointers()->vkCmdEndRenderPassFn(commandBuffer);
+ return gpu::GetVulkanFunctionPointers()->vkCmdEndRenderPass(commandBuffer);
}
ALWAYS_INLINE void vkCmdExecuteCommands(
VkCommandBuffer commandBuffer,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers) {
- return gpu::GetVulkanFunctionPointers()->vkCmdExecuteCommandsFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdExecuteCommands(
commandBuffer, commandBufferCount, pCommandBuffers);
}
ALWAYS_INLINE void vkCmdNextSubpass(VkCommandBuffer commandBuffer,
VkSubpassContents contents) {
- return gpu::GetVulkanFunctionPointers()->vkCmdNextSubpassFn(commandBuffer,
- contents);
+ return gpu::GetVulkanFunctionPointers()->vkCmdNextSubpass(commandBuffer,
+ contents);
}
ALWAYS_INLINE void vkCmdPipelineBarrier(
VkCommandBuffer commandBuffer,
@@ -620,7 +625,7 @@ ALWAYS_INLINE void vkCmdPipelineBarrier(
const VkBufferMemoryBarrier* pBufferMemoryBarriers,
uint32_t imageMemoryBarrierCount,
const VkImageMemoryBarrier* pImageMemoryBarriers) {
- return gpu::GetVulkanFunctionPointers()->vkCmdPipelineBarrierFn(
+ return gpu::GetVulkanFunctionPointers()->vkCmdPipelineBarrier(
commandBuffer, srcStageMask, dstStageMask, dependencyFlags,
memoryBarrierCount, pMemoryBarriers, bufferMemoryBarrierCount,
pBufferMemoryBarriers, imageMemoryBarrierCount, pImageMemoryBarriers);
@@ -629,15 +634,15 @@ ALWAYS_INLINE VkResult vkCreateBuffer(VkDevice device,
const VkBufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkBuffer* pBuffer) {
- return gpu::GetVulkanFunctionPointers()->vkCreateBufferFn(
- device, pCreateInfo, pAllocator, pBuffer);
+ return gpu::GetVulkanFunctionPointers()->vkCreateBuffer(device, pCreateInfo,
+ pAllocator, pBuffer);
}
ALWAYS_INLINE VkResult
vkCreateCommandPool(VkDevice device,
const VkCommandPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkCommandPool* pCommandPool) {
- return gpu::GetVulkanFunctionPointers()->vkCreateCommandPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateCommandPool(
device, pCreateInfo, pAllocator, pCommandPool);
}
ALWAYS_INLINE VkResult
@@ -645,7 +650,7 @@ vkCreateDescriptorPool(VkDevice device,
const VkDescriptorPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorPool* pDescriptorPool) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorPool(
device, pCreateInfo, pAllocator, pDescriptorPool);
}
ALWAYS_INLINE VkResult
@@ -653,37 +658,37 @@ vkCreateDescriptorSetLayout(VkDevice device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorSetLayout* pSetLayout) {
- return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorSetLayoutFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateDescriptorSetLayout(
device, pCreateInfo, pAllocator, pSetLayout);
}
ALWAYS_INLINE VkResult vkCreateFence(VkDevice device,
const VkFenceCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkFence* pFence) {
- return gpu::GetVulkanFunctionPointers()->vkCreateFenceFn(device, pCreateInfo,
- pAllocator, pFence);
+ return gpu::GetVulkanFunctionPointers()->vkCreateFence(device, pCreateInfo,
+ pAllocator, pFence);
}
ALWAYS_INLINE VkResult
vkCreateFramebuffer(VkDevice device,
const VkFramebufferCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkFramebuffer* pFramebuffer) {
- return gpu::GetVulkanFunctionPointers()->vkCreateFramebufferFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateFramebuffer(
device, pCreateInfo, pAllocator, pFramebuffer);
}
ALWAYS_INLINE VkResult vkCreateImage(VkDevice device,
const VkImageCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkImage* pImage) {
- return gpu::GetVulkanFunctionPointers()->vkCreateImageFn(device, pCreateInfo,
- pAllocator, pImage);
+ return gpu::GetVulkanFunctionPointers()->vkCreateImage(device, pCreateInfo,
+ pAllocator, pImage);
}
ALWAYS_INLINE VkResult
vkCreateImageView(VkDevice device,
const VkImageViewCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkImageView* pView) {
- return gpu::GetVulkanFunctionPointers()->vkCreateImageViewFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateImageView(
device, pCreateInfo, pAllocator, pView);
}
ALWAYS_INLINE VkResult
@@ -691,14 +696,14 @@ vkCreateRenderPass(VkDevice device,
const VkRenderPassCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkRenderPass* pRenderPass) {
- return gpu::GetVulkanFunctionPointers()->vkCreateRenderPassFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateRenderPass(
device, pCreateInfo, pAllocator, pRenderPass);
}
ALWAYS_INLINE VkResult vkCreateSampler(VkDevice device,
const VkSamplerCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSampler* pSampler) {
- return gpu::GetVulkanFunctionPointers()->vkCreateSamplerFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateSampler(
device, pCreateInfo, pAllocator, pSampler);
}
ALWAYS_INLINE VkResult
@@ -706,7 +711,7 @@ vkCreateSemaphore(VkDevice device,
const VkSemaphoreCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSemaphore* pSemaphore) {
- return gpu::GetVulkanFunctionPointers()->vkCreateSemaphoreFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateSemaphore(
device, pCreateInfo, pAllocator, pSemaphore);
}
ALWAYS_INLINE VkResult
@@ -714,111 +719,110 @@ vkCreateShaderModule(VkDevice device,
const VkShaderModuleCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkShaderModule* pShaderModule) {
- return gpu::GetVulkanFunctionPointers()->vkCreateShaderModuleFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateShaderModule(
device, pCreateInfo, pAllocator, pShaderModule);
}
ALWAYS_INLINE void vkDestroyBuffer(VkDevice device,
VkBuffer buffer,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyBufferFn(device, buffer,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyBuffer(device, buffer,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyCommandPool(
VkDevice device,
VkCommandPool commandPool,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyCommandPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyCommandPool(
device, commandPool, pAllocator);
}
ALWAYS_INLINE void vkDestroyDescriptorPool(
VkDevice device,
VkDescriptorPool descriptorPool,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorPoolFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorPool(
device, descriptorPool, pAllocator);
}
ALWAYS_INLINE void vkDestroyDescriptorSetLayout(
VkDevice device,
VkDescriptorSetLayout descriptorSetLayout,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorSetLayoutFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDescriptorSetLayout(
device, descriptorSetLayout, pAllocator);
}
ALWAYS_INLINE void vkDestroyDevice(VkDevice device,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyDeviceFn(device,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyDevice(device, pAllocator);
}
ALWAYS_INLINE void vkDestroyFence(VkDevice device,
VkFence fence,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyFenceFn(device, fence,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyFence(device, fence,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyFramebuffer(
VkDevice device,
VkFramebuffer framebuffer,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyFramebufferFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyFramebuffer(
device, framebuffer, pAllocator);
}
ALWAYS_INLINE void vkDestroyImage(VkDevice device,
VkImage image,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyImageFn(device, image,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyImage(device, image,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyImageView(VkDevice device,
VkImageView imageView,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyImageViewFn(
- device, imageView, pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroyImageView(device, imageView,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyRenderPass(
VkDevice device,
VkRenderPass renderPass,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyRenderPassFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyRenderPass(
device, renderPass, pAllocator);
}
ALWAYS_INLINE void vkDestroySampler(VkDevice device,
VkSampler sampler,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySamplerFn(device, sampler,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroySampler(device, sampler,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroySemaphore(VkDevice device,
VkSemaphore semaphore,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySemaphoreFn(
- device, semaphore, pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkDestroySemaphore(device, semaphore,
+ pAllocator);
}
ALWAYS_INLINE void vkDestroyShaderModule(
VkDevice device,
VkShaderModule shaderModule,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroyShaderModuleFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroyShaderModule(
device, shaderModule, pAllocator);
}
ALWAYS_INLINE VkResult vkDeviceWaitIdle(VkDevice device) {
- return gpu::GetVulkanFunctionPointers()->vkDeviceWaitIdleFn(device);
+ return gpu::GetVulkanFunctionPointers()->vkDeviceWaitIdle(device);
}
ALWAYS_INLINE VkResult
vkFlushMappedMemoryRanges(VkDevice device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges) {
- return gpu::GetVulkanFunctionPointers()->vkFlushMappedMemoryRangesFn(
+ return gpu::GetVulkanFunctionPointers()->vkFlushMappedMemoryRanges(
device, memoryRangeCount, pMemoryRanges);
}
ALWAYS_INLINE VkResult vkEndCommandBuffer(VkCommandBuffer commandBuffer) {
- return gpu::GetVulkanFunctionPointers()->vkEndCommandBufferFn(commandBuffer);
+ return gpu::GetVulkanFunctionPointers()->vkEndCommandBuffer(commandBuffer);
}
ALWAYS_INLINE void vkFreeCommandBuffers(
VkDevice device,
VkCommandPool commandPool,
uint32_t commandBufferCount,
const VkCommandBuffer* pCommandBuffers) {
- return gpu::GetVulkanFunctionPointers()->vkFreeCommandBuffersFn(
+ return gpu::GetVulkanFunctionPointers()->vkFreeCommandBuffers(
device, commandPool, commandBufferCount, pCommandBuffers);
}
ALWAYS_INLINE VkResult
@@ -826,78 +830,98 @@ vkFreeDescriptorSets(VkDevice device,
VkDescriptorPool descriptorPool,
uint32_t descriptorSetCount,
const VkDescriptorSet* pDescriptorSets) {
- return gpu::GetVulkanFunctionPointers()->vkFreeDescriptorSetsFn(
+ return gpu::GetVulkanFunctionPointers()->vkFreeDescriptorSets(
device, descriptorPool, descriptorSetCount, pDescriptorSets);
}
ALWAYS_INLINE void vkFreeMemory(VkDevice device,
VkDeviceMemory memory,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkFreeMemoryFn(device, memory,
- pAllocator);
+ return gpu::GetVulkanFunctionPointers()->vkFreeMemory(device, memory,
+ pAllocator);
}
ALWAYS_INLINE VkResult
vkInvalidateMappedMemoryRanges(VkDevice device,
uint32_t memoryRangeCount,
const VkMappedMemoryRange* pMemoryRanges) {
- return gpu::GetVulkanFunctionPointers()->vkInvalidateMappedMemoryRangesFn(
+ return gpu::GetVulkanFunctionPointers()->vkInvalidateMappedMemoryRanges(
device, memoryRangeCount, pMemoryRanges);
}
ALWAYS_INLINE void vkGetBufferMemoryRequirements(
VkDevice device,
VkBuffer buffer,
VkMemoryRequirements* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirementsFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirements(
device, buffer, pMemoryRequirements);
}
+ALWAYS_INLINE void vkGetBufferMemoryRequirements2(
+ VkDevice device,
+ const VkBufferMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements) {
+ return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirements2(
+ device, pInfo, pMemoryRequirements);
+}
ALWAYS_INLINE void vkGetDeviceQueue(VkDevice device,
uint32_t queueFamilyIndex,
uint32_t queueIndex,
VkQueue* pQueue) {
- return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueueFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueue(
device, queueFamilyIndex, queueIndex, pQueue);
}
+ALWAYS_INLINE void vkGetDeviceQueue2(VkDevice device,
+ const VkDeviceQueueInfo2* pQueueInfo,
+ VkQueue* pQueue) {
+ return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueue2(device, pQueueInfo,
+ pQueue);
+}
ALWAYS_INLINE VkResult vkGetFenceStatus(VkDevice device, VkFence fence) {
- return gpu::GetVulkanFunctionPointers()->vkGetFenceStatusFn(device, fence);
+ return gpu::GetVulkanFunctionPointers()->vkGetFenceStatus(device, fence);
}
ALWAYS_INLINE void vkGetImageMemoryRequirements(
VkDevice device,
VkImage image,
VkMemoryRequirements* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirementsFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirements(
device, image, pMemoryRequirements);
}
+ALWAYS_INLINE void vkGetImageMemoryRequirements2(
+ VkDevice device,
+ const VkImageMemoryRequirementsInfo2* pInfo,
+ VkMemoryRequirements2* pMemoryRequirements) {
+ return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirements2(
+ device, pInfo, pMemoryRequirements);
+}
ALWAYS_INLINE VkResult vkMapMemory(VkDevice device,
VkDeviceMemory memory,
VkDeviceSize offset,
VkDeviceSize size,
VkMemoryMapFlags flags,
void** ppData) {
- return gpu::GetVulkanFunctionPointers()->vkMapMemoryFn(device, memory, offset,
- size, flags, ppData);
+ return gpu::GetVulkanFunctionPointers()->vkMapMemory(device, memory, offset,
+ size, flags, ppData);
}
ALWAYS_INLINE VkResult vkQueueSubmit(VkQueue queue,
uint32_t submitCount,
const VkSubmitInfo* pSubmits,
VkFence fence) {
- return gpu::GetVulkanFunctionPointers()->vkQueueSubmitFn(queue, submitCount,
- pSubmits, fence);
+ return gpu::GetVulkanFunctionPointers()->vkQueueSubmit(queue, submitCount,
+ pSubmits, fence);
}
ALWAYS_INLINE VkResult vkQueueWaitIdle(VkQueue queue) {
- return gpu::GetVulkanFunctionPointers()->vkQueueWaitIdleFn(queue);
+ return gpu::GetVulkanFunctionPointers()->vkQueueWaitIdle(queue);
}
ALWAYS_INLINE VkResult vkResetCommandBuffer(VkCommandBuffer commandBuffer,
VkCommandBufferResetFlags flags) {
- return gpu::GetVulkanFunctionPointers()->vkResetCommandBufferFn(commandBuffer,
- flags);
+ return gpu::GetVulkanFunctionPointers()->vkResetCommandBuffer(commandBuffer,
+ flags);
}
ALWAYS_INLINE VkResult vkResetFences(VkDevice device,
uint32_t fenceCount,
const VkFence* pFences) {
- return gpu::GetVulkanFunctionPointers()->vkResetFencesFn(device, fenceCount,
- pFences);
+ return gpu::GetVulkanFunctionPointers()->vkResetFences(device, fenceCount,
+ pFences);
}
ALWAYS_INLINE void vkUnmapMemory(VkDevice device, VkDeviceMemory memory) {
- return gpu::GetVulkanFunctionPointers()->vkUnmapMemoryFn(device, memory);
+ return gpu::GetVulkanFunctionPointers()->vkUnmapMemory(device, memory);
}
ALWAYS_INLINE void vkUpdateDescriptorSets(
VkDevice device,
@@ -905,7 +929,7 @@ ALWAYS_INLINE void vkUpdateDescriptorSets(
const VkWriteDescriptorSet* pDescriptorWrites,
uint32_t descriptorCopyCount,
const VkCopyDescriptorSet* pDescriptorCopies) {
- return gpu::GetVulkanFunctionPointers()->vkUpdateDescriptorSetsFn(
+ return gpu::GetVulkanFunctionPointers()->vkUpdateDescriptorSets(
device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount,
pDescriptorCopies);
}
@@ -914,39 +938,18 @@ ALWAYS_INLINE VkResult vkWaitForFences(VkDevice device,
const VkFence* pFences,
VkBool32 waitAll,
uint64_t timeout) {
- return gpu::GetVulkanFunctionPointers()->vkWaitForFencesFn(
+ return gpu::GetVulkanFunctionPointers()->vkWaitForFences(
device, fenceCount, pFences, waitAll, timeout);
}
-ALWAYS_INLINE void vkGetDeviceQueue2(VkDevice device,
- const VkDeviceQueueInfo2* pQueueInfo,
- VkQueue* pQueue) {
- return gpu::GetVulkanFunctionPointers()->vkGetDeviceQueue2Fn(
- device, pQueueInfo, pQueue);
-}
-ALWAYS_INLINE void vkGetBufferMemoryRequirements2(
- VkDevice device,
- const VkBufferMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetBufferMemoryRequirements2Fn(
- device, pInfo, pMemoryRequirements);
-}
-ALWAYS_INLINE void vkGetImageMemoryRequirements2(
- VkDevice device,
- const VkImageMemoryRequirementsInfo2* pInfo,
- VkMemoryRequirements2* pMemoryRequirements) {
- return gpu::GetVulkanFunctionPointers()->vkGetImageMemoryRequirements2Fn(
- device, pInfo, pMemoryRequirements);
-}
-
#if defined(OS_ANDROID)
ALWAYS_INLINE VkResult vkGetAndroidHardwareBufferPropertiesANDROID(
VkDevice device,
const struct AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
return gpu::GetVulkanFunctionPointers()
- ->vkGetAndroidHardwareBufferPropertiesANDROIDFn(device, buffer,
- pProperties);
+ ->vkGetAndroidHardwareBufferPropertiesANDROID(device, buffer,
+ pProperties);
}
#endif // defined(OS_ANDROID)
@@ -955,13 +958,13 @@ ALWAYS_INLINE VkResult
vkGetSemaphoreFdKHR(VkDevice device,
const VkSemaphoreGetFdInfoKHR* pGetFdInfo,
int* pFd) {
- return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreFdKHRFn(
- device, pGetFdInfo, pFd);
+ return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreFdKHR(device,
+ pGetFdInfo, pFd);
}
ALWAYS_INLINE VkResult vkImportSemaphoreFdKHR(
VkDevice device,
const VkImportSemaphoreFdInfoKHR* pImportSemaphoreFdInfo) {
- return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreFdKHR(
device, pImportSemaphoreFdInfo);
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
@@ -971,14 +974,14 @@ ALWAYS_INLINE VkResult vkGetSemaphoreWin32HandleKHR(
VkDevice device,
const VkSemaphoreGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle) {
- return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreWin32HandleKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetSemaphoreWin32HandleKHR(
device, pGetWin32HandleInfo, pHandle);
}
ALWAYS_INLINE VkResult
vkImportSemaphoreWin32HandleKHR(VkDevice device,
const VkImportSemaphoreWin32HandleInfoKHR*
pImportSemaphoreWin32HandleInfo) {
- return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreWin32HandleKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkImportSemaphoreWin32HandleKHR(
device, pImportSemaphoreWin32HandleInfo);
}
#endif // defined(OS_WIN)
@@ -987,15 +990,15 @@ vkImportSemaphoreWin32HandleKHR(VkDevice device,
ALWAYS_INLINE VkResult vkGetMemoryFdKHR(VkDevice device,
const VkMemoryGetFdInfoKHR* pGetFdInfo,
int* pFd) {
- return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdKHRFn(device,
- pGetFdInfo, pFd);
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdKHR(device, pGetFdInfo,
+ pFd);
}
ALWAYS_INLINE VkResult
vkGetMemoryFdPropertiesKHR(VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
int fd,
VkMemoryFdPropertiesKHR* pMemoryFdProperties) {
- return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdPropertiesKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryFdPropertiesKHR(
device, handleType, fd, pMemoryFdProperties);
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
@@ -1005,7 +1008,7 @@ ALWAYS_INLINE VkResult vkGetMemoryWin32HandleKHR(
VkDevice device,
const VkMemoryGetWin32HandleInfoKHR* pGetWin32HandleInfo,
HANDLE* pHandle) {
- return gpu::GetVulkanFunctionPointers()->vkGetMemoryWin32HandleKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryWin32HandleKHR(
device, pGetWin32HandleInfo, pHandle);
}
ALWAYS_INLINE VkResult vkGetMemoryWin32HandlePropertiesKHR(
@@ -1013,33 +1016,32 @@ ALWAYS_INLINE VkResult vkGetMemoryWin32HandlePropertiesKHR(
VkExternalMemoryHandleTypeFlagBits handleType,
HANDLE handle,
VkMemoryWin32HandlePropertiesKHR* pMemoryWin32HandleProperties) {
- return gpu::GetVulkanFunctionPointers()
- ->vkGetMemoryWin32HandlePropertiesKHRFn(device, handleType, handle,
- pMemoryWin32HandleProperties);
+ return gpu::GetVulkanFunctionPointers()->vkGetMemoryWin32HandlePropertiesKHR(
+ device, handleType, handle, pMemoryWin32HandleProperties);
}
#endif // defined(OS_WIN)
#if defined(OS_FUCHSIA)
#define vkImportSemaphoreZirconHandleFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkImportSemaphoreZirconHandleFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkImportSemaphoreZirconHandleFUCHSIA
#define vkGetSemaphoreZirconHandleFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkGetSemaphoreZirconHandleFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkGetSemaphoreZirconHandleFUCHSIA
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
#define vkGetMemoryZirconHandleFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkGetMemoryZirconHandleFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkGetMemoryZirconHandleFUCHSIA
#endif // defined(OS_FUCHSIA)
#if defined(OS_FUCHSIA)
#define vkCreateBufferCollectionFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkCreateBufferCollectionFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkCreateBufferCollectionFUCHSIA
#define vkSetBufferCollectionConstraintsFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkSetBufferCollectionConstraintsFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkSetBufferCollectionConstraintsFUCHSIA
#define vkGetBufferCollectionPropertiesFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkGetBufferCollectionPropertiesFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkGetBufferCollectionPropertiesFUCHSIA
#define vkDestroyBufferCollectionFUCHSIA \
- gpu::GetVulkanFunctionPointers()->vkDestroyBufferCollectionFUCHSIAFn
+ gpu::GetVulkanFunctionPointers()->vkDestroyBufferCollectionFUCHSIA
#endif // defined(OS_FUCHSIA)
ALWAYS_INLINE VkResult vkAcquireNextImageKHR(VkDevice device,
@@ -1048,7 +1050,7 @@ ALWAYS_INLINE VkResult vkAcquireNextImageKHR(VkDevice device,
VkSemaphore semaphore,
VkFence fence,
uint32_t* pImageIndex) {
- return gpu::GetVulkanFunctionPointers()->vkAcquireNextImageKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkAcquireNextImageKHR(
device, swapchain, timeout, semaphore, fence, pImageIndex);
}
ALWAYS_INLINE VkResult
@@ -1056,27 +1058,27 @@ vkCreateSwapchainKHR(VkDevice device,
const VkSwapchainCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSwapchainKHR* pSwapchain) {
- return gpu::GetVulkanFunctionPointers()->vkCreateSwapchainKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkCreateSwapchainKHR(
device, pCreateInfo, pAllocator, pSwapchain);
}
ALWAYS_INLINE void vkDestroySwapchainKHR(
VkDevice device,
VkSwapchainKHR swapchain,
const VkAllocationCallbacks* pAllocator) {
- return gpu::GetVulkanFunctionPointers()->vkDestroySwapchainKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkDestroySwapchainKHR(
device, swapchain, pAllocator);
}
ALWAYS_INLINE VkResult vkGetSwapchainImagesKHR(VkDevice device,
VkSwapchainKHR swapchain,
uint32_t* pSwapchainImageCount,
VkImage* pSwapchainImages) {
- return gpu::GetVulkanFunctionPointers()->vkGetSwapchainImagesKHRFn(
+ return gpu::GetVulkanFunctionPointers()->vkGetSwapchainImagesKHR(
device, swapchain, pSwapchainImageCount, pSwapchainImages);
}
ALWAYS_INLINE VkResult vkQueuePresentKHR(VkQueue queue,
const VkPresentInfoKHR* pPresentInfo) {
- return gpu::GetVulkanFunctionPointers()->vkQueuePresentKHRFn(queue,
- pPresentInfo);
+ return gpu::GetVulkanFunctionPointers()->vkQueuePresentKHR(queue,
+ pPresentInfo);
}
#endif // GPU_VULKAN_VULKAN_FUNCTION_POINTERS_H_ \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_image.cc b/chromium/gpu/vulkan/vulkan_image.cc
index c01245faf1f..fa199dd224d 100644
--- a/chromium/gpu/vulkan/vulkan_image.cc
+++ b/chromium/gpu/vulkan/vulkan_image.cc
@@ -8,6 +8,7 @@
#include <algorithm>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/stl_util.h"
@@ -103,7 +104,8 @@ std::unique_ptr<VulkanImage> VulkanImage::Create(
VkImageTiling image_tiling,
VkDeviceSize device_size,
uint32_t memory_type_index,
- base::Optional<VulkanYCbCrInfo>& ycbcr_info) {
+ base::Optional<VulkanYCbCrInfo>& ycbcr_info,
+ VkImageCreateFlags flags) {
auto image = std::make_unique<VulkanImage>(util::PassKey<VulkanImage>());
image->device_queue_ = device_queue;
image->image_ = vk_image;
@@ -114,6 +116,7 @@ std::unique_ptr<VulkanImage> VulkanImage::Create(
image->device_size_ = device_size;
image->memory_type_index_ = memory_type_index;
image->ycbcr_info_ = ycbcr_info;
+ image->flags_ = flags;
return image;
}
@@ -203,7 +206,7 @@ bool VulkanImage::Initialize(VulkanDeviceQueue* device_queue,
vkCreateImage(vk_device, &create_info, nullptr /* pAllocator */, &image_);
if (result != VK_SUCCESS) {
DLOG(ERROR) << "vkCreateImage failed result:" << result;
- device_queue_ = VK_NULL_HANDLE;
+ device_queue_ = nullptr;
return false;
}
@@ -339,4 +342,4 @@ bool VulkanImage::InitializeWithExternalMemory(VulkanDeviceQueue* device_queue,
nullptr /* requirements */);
}
-} // namespace gpu \ No newline at end of file
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_image.h b/chromium/gpu/vulkan/vulkan_image.h
index 888bf865241..d48dd9bf706 100644
--- a/chromium/gpu/vulkan/vulkan_image.h
+++ b/chromium/gpu/vulkan/vulkan_image.h
@@ -74,7 +74,8 @@ class COMPONENT_EXPORT(VULKAN) VulkanImage {
VkImageTiling image_tiling,
VkDeviceSize device_size,
uint32_t memory_type_index,
- base::Optional<VulkanYCbCrInfo>& ycbcr_info);
+ base::Optional<VulkanYCbCrInfo>& ycbcr_info,
+ VkImageCreateFlags flags = 0);
void Destroy();
diff --git a/chromium/gpu/vulkan/vulkan_image_android.cc b/chromium/gpu/vulkan/vulkan_image_android.cc
index 5da67ae4f68..7d64386a332 100644
--- a/chromium/gpu/vulkan/vulkan_image_android.cc
+++ b/chromium/gpu/vulkan/vulkan_image_android.cc
@@ -5,6 +5,7 @@
#include "gpu/vulkan/vulkan_image.h"
#include "base/android/android_hardware_buffer_compat.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
@@ -138,4 +139,4 @@ bool VulkanImage::InitializeFromGpuMemoryBufferHandle(
return true;
}
-} // namespace gpu \ No newline at end of file
+} // namespace gpu
diff --git a/chromium/gpu/vulkan/vulkan_image_fuchsia.cc b/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
index 6748ed5a228..b462ba9e99b 100644
--- a/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
+++ b/chromium/gpu/vulkan/vulkan_image_fuchsia.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "gpu/vulkan/fuchsia/vulkan_fuchsia_ext.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
diff --git a/chromium/gpu/vulkan/vulkan_image_linux.cc b/chromium/gpu/vulkan/vulkan_image_linux.cc
index 789b8e3b0e4..41ee3996cf5 100644
--- a/chromium/gpu/vulkan/vulkan_image_linux.cc
+++ b/chromium/gpu/vulkan/vulkan_image_linux.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
namespace gpu {
diff --git a/chromium/gpu/vulkan/vulkan_image_unittest.cc b/chromium/gpu/vulkan/vulkan_image_unittest.cc
index 0338611e057..54c717fc747 100644
--- a/chromium/gpu/vulkan/vulkan_image_unittest.cc
+++ b/chromium/gpu/vulkan/vulkan_image_unittest.cc
@@ -4,8 +4,10 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "build/build_config.h"
#include "gpu/config/gpu_info_collector.h"
+#include "gpu/config/gpu_test_config.h"
#include "gpu/ipc/service/gpu_memory_buffer_factory.h"
#include "gpu/vulkan/tests/basic_vulkan_test.h"
#include "gpu/vulkan/vulkan_device_queue.h"
@@ -54,18 +56,9 @@ TEST_F(VulkanImageTest, Create) {
TEST_F(VulkanImageTest, CreateWithExternalMemory) {
{
- GPUInfo gpu_info;
- CHECK(CollectBasicGraphicsInfo(&gpu_info));
-
- // TODO(crbug.com/1069516): Fails on Intel driver >= 26.20.100.7158; this is
- // seen on Win10 FYI x64 Exp Release (Intel HD 630), with 26.20.100.7870.
- if (gpu_info.gpu.driver_version == "26.20.100.7870") {
- // Can't be sure primary GPU is being used, so check it's the only one
- // (aside from the Microsoft software renderer).
- CHECK(gpu_info.secondary_gpus.size() == 1);
- // Skip test.
+ // TODO(crbug.com/1069516) : Fails on current driver version on this bot.
+ if (GPUTestBotConfig::CurrentConfigMatches("Win10"))
return;
- }
}
constexpr gfx::Size size(100, 100);
diff --git a/chromium/gpu/vulkan/vulkan_image_win.cc b/chromium/gpu/vulkan/vulkan_image_win.cc
index 6bd6ef2a56b..6501ceb1644 100644
--- a/chromium/gpu/vulkan/vulkan_image_win.cc
+++ b/chromium/gpu/vulkan/vulkan_image_win.cc
@@ -4,6 +4,7 @@
#include "gpu/vulkan/vulkan_image.h"
+#include "base/logging.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
diff --git a/chromium/gpu/vulkan/vulkan_implementation.h b/chromium/gpu/vulkan/vulkan_implementation.h
index 3ee8cd5f4ad..42380494565 100644
--- a/chromium/gpu/vulkan/vulkan_implementation.h
+++ b/chromium/gpu/vulkan/vulkan_implementation.h
@@ -134,7 +134,9 @@ class COMPONENT_EXPORT(VULKAN) VulkanImplementation {
virtual std::unique_ptr<SysmemBufferCollection>
RegisterSysmemBufferCollection(VkDevice device,
gfx::SysmemBufferCollectionId id,
- zx::channel token) = 0;
+ zx::channel token,
+ gfx::BufferFormat format,
+ gfx::BufferUsage usage) = 0;
#endif // defined(OS_FUCHSIA)
bool use_swiftshader() const { return use_swiftshader_; }
diff --git a/chromium/gpu/vulkan/vulkan_instance.cc b/chromium/gpu/vulkan/vulkan_instance.cc
index 49d8709a177..c68a852d2a4 100644
--- a/chromium/gpu/vulkan/vulkan_instance.cc
+++ b/chromium/gpu/vulkan/vulkan_instance.cc
@@ -79,19 +79,19 @@ bool VulkanInstance::Initialize(
if (!vulkan_function_pointers->BindUnassociatedFunctionPointers())
return false;
- if (vulkan_function_pointers->vkEnumerateInstanceVersionFn)
- vkEnumerateInstanceVersion(&vulkan_info_.api_version);
+ VkResult result = vkEnumerateInstanceVersion(&vulkan_info_.api_version);
+ if (result != VK_SUCCESS) {
+ DLOG(ERROR) << "vkEnumerateInstanceVersion() failed: " << result;
+ return false;
+ }
- if (vulkan_info_.api_version < VK_MAKE_VERSION(1, 1, 0))
+ if (vulkan_info_.api_version < kVulkanRequiredApiVersion)
return false;
gpu::crash_keys::vulkan_api_version.Set(
VkVersionToString(vulkan_info_.api_version));
- // Use Vulkan 1.1 if it's available.
- vulkan_info_.used_api_version = VK_MAKE_VERSION(1, 1, 0);
-
- VkResult result = VK_SUCCESS;
+ vulkan_info_.used_api_version = kVulkanRequiredApiVersion;
VkApplicationInfo app_info = {};
app_info.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
@@ -304,7 +304,8 @@ bool VulkanInstance::CollectInfo() {
// API version of the VkPhysicalDevice, so we need to check the GPU's
// API version instead of just testing to see if
// vkGetPhysicalDeviceFeatures2 is non-null.
- if (info.properties.apiVersion >= VK_MAKE_VERSION(1, 1, 0)) {
+ static_assert(kVulkanRequiredApiVersion >= VK_API_VERSION_1_1, "");
+ if (info.properties.apiVersion >= kVulkanRequiredApiVersion) {
VkPhysicalDeviceSamplerYcbcrConversionFeatures ycbcr_conversion_features =
{VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES};
VkPhysicalDeviceProtectedMemoryFeatures protected_memory_feature = {
@@ -319,8 +320,6 @@ bool VulkanInstance::CollectInfo() {
info.feature_sampler_ycbcr_conversion =
ycbcr_conversion_features.samplerYcbcrConversion;
info.feature_protected_memory = protected_memory_feature.protectedMemory;
- } else {
- vkGetPhysicalDeviceFeatures(device, &info.features);
}
count = 0;
diff --git a/chromium/gpu/vulkan/vulkan_instance.h b/chromium/gpu/vulkan/vulkan_instance.h
index bc22cd4d312..2b119789f58 100644
--- a/chromium/gpu/vulkan/vulkan_instance.h
+++ b/chromium/gpu/vulkan/vulkan_instance.h
@@ -8,8 +8,8 @@
#include <vulkan/vulkan.h>
#include <memory>
+#include "base/check_op.h"
#include "base/component_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "gpu/config/vulkan_info.h"
#include "ui/gfx/extension_set.h"
diff --git a/chromium/gpu/vulkan/vulkan_surface.cc b/chromium/gpu/vulkan/vulkan_surface.cc
index 8e20ccb7ded..5dfdff528c3 100644
--- a/chromium/gpu/vulkan/vulkan_surface.cc
+++ b/chromium/gpu/vulkan/vulkan_surface.cc
@@ -8,8 +8,10 @@
#include <algorithm>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/stl_util.h"
+#include "base/threading/scoped_blocking_call.h"
#include "gpu/vulkan/vulkan_device_queue.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "gpu/vulkan/vulkan_swap_chain.h"
@@ -68,6 +70,9 @@ gfx::OverlayTransform FromVkSurfaceTransformFlag(
}
}
+// Minimum VkImages in a vulkan swap chain.
+uint32_t kMinImageCount = 3u;
+
} // namespace
VulkanSurface::~VulkanSurface() {
@@ -165,8 +170,6 @@ bool VulkanSurface::Initialize(VulkanDeviceQueue* device_queue,
return false;
}
- image_count_ = std::max(surface_caps.minImageCount, 3u);
-
return true;
}
@@ -184,10 +187,18 @@ gfx::SwapResult VulkanSurface::SwapBuffers() {
}
gfx::SwapResult VulkanSurface::PostSubBuffer(const gfx::Rect& rect) {
- return swap_chain_->PresentBuffer(rect);
+ return swap_chain_->PostSubBuffer(rect);
+}
+
+void VulkanSurface::PostSubBufferAsync(
+ const gfx::Rect& rect,
+ VulkanSwapChain::PostSubBufferCompletionCallback callback) {
+ swap_chain_->PostSubBufferAsync(rect, std::move(callback));
}
void VulkanSurface::Finish() {
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::WILL_BLOCK);
vkQueueWaitIdle(device_queue_->GetVulkanQueue());
}
@@ -259,12 +270,12 @@ bool VulkanSurface::CreateSwapChain(const gfx::Size& size,
transform_ = transform;
auto swap_chain = std::make_unique<VulkanSwapChain>();
-
// Create swap chain.
- DCHECK_EQ(image_count_, std::max(surface_caps.minImageCount, 3u));
- if (!swap_chain->Initialize(
- device_queue_, surface_, surface_format_, image_size_, image_count_,
- vk_transform, enforce_protected_memory_, std::move(swap_chain_))) {
+ auto min_image_count = std::max(surface_caps.minImageCount, kMinImageCount);
+ if (!swap_chain->Initialize(device_queue_, surface_, surface_format_,
+ image_size_, min_image_count, vk_transform,
+ enforce_protected_memory_,
+ std::move(swap_chain_))) {
return false;
}
diff --git a/chromium/gpu/vulkan/vulkan_surface.h b/chromium/gpu/vulkan/vulkan_surface.h
index 7620a2e4ebf..e61cd97bb8f 100644
--- a/chromium/gpu/vulkan/vulkan_surface.h
+++ b/chromium/gpu/vulkan/vulkan_surface.h
@@ -46,6 +46,9 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
gfx::SwapResult SwapBuffers();
gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
+ void PostSubBufferAsync(
+ const gfx::Rect& rect,
+ VulkanSwapChain::PostSubBufferCompletionCallback callback);
void Finish();
@@ -62,7 +65,6 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
uint32_t swap_chain_generation() const { return swap_chain_generation_; }
const gfx::Size& image_size() const { return image_size_; }
gfx::OverlayTransform transform() const { return transform_; }
- uint32_t image_count() const { return image_count_; }
VkSurfaceFormatKHR surface_format() const { return surface_format_; }
private:
@@ -87,9 +89,6 @@ class COMPONENT_EXPORT(VULKAN) VulkanSurface {
// Swap chain pre-transform.
gfx::OverlayTransform transform_ = gfx::OVERLAY_TRANSFORM_INVALID;
- // Swap chain image count.
- uint32_t image_count_ = 0u;
-
std::unique_ptr<VulkanSwapChain> swap_chain_;
DISALLOW_COPY_AND_ASSIGN(VulkanSurface);
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.cc b/chromium/gpu/vulkan/vulkan_swap_chain.cc
index c88b19f8ded..4d7eede7033 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.cc
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.cc
@@ -5,6 +5,11 @@
#include "gpu/vulkan/vulkan_swap_chain.h"
#include "base/bind.h"
+#include "base/logging.h"
+#include "base/task/task_traits.h"
+#include "base/task/thread_pool.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
#include "gpu/vulkan/vulkan_command_buffer.h"
#include "gpu/vulkan/vulkan_command_pool.h"
@@ -17,7 +22,7 @@ namespace {
VkSemaphore CreateSemaphore(VkDevice vk_device) {
// Generic semaphore creation structure.
- VkSemaphoreCreateInfo semaphore_create_info = {
+ constexpr VkSemaphoreCreateInfo semaphore_create_info = {
VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO};
VkSemaphore vk_semaphore;
@@ -30,11 +35,17 @@ VkSemaphore CreateSemaphore(VkDevice vk_device) {
} // namespace
-VulkanSwapChain::VulkanSwapChain() {}
+VulkanSwapChain::VulkanSwapChain() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+}
VulkanSwapChain::~VulkanSwapChain() {
+#if DCHECK_IS_ON()
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(images_.empty());
DCHECK_EQ(static_cast<VkSwapchainKHR>(VK_NULL_HANDLE), swap_chain_);
+#endif
}
bool VulkanSwapChain::Initialize(
@@ -46,8 +57,12 @@ bool VulkanSwapChain::Initialize(
VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(device_queue);
DCHECK(!use_protected_memory || device_queue->allow_protected_memory());
+
+ task_runner_ = base::ThreadTaskRunnerHandle::Get();
use_protected_memory_ = use_protected_memory;
device_queue_ = device_queue;
is_incremental_present_supported_ =
@@ -57,100 +72,67 @@ bool VulkanSwapChain::Initialize(
return InitializeSwapChain(surface, surface_format, image_size,
min_image_count, pre_transform,
use_protected_memory, std::move(old_swap_chain)) &&
- InitializeSwapImages(surface_format);
+ InitializeSwapImages(surface_format) && AcquireNextImage();
}
void VulkanSwapChain::Destroy() {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ WaitUntilPostSubBufferAsyncFinished();
+
DCHECK(!is_writing_);
DestroySwapImages();
DestroySwapChain();
}
-gfx::SwapResult VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) {
- DCHECK(acquired_image_);
- DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
-
- VkResult result = VK_SUCCESS;
- VkDevice device = device_queue_->GetVulkanDevice();
- VkQueue queue = device_queue_->GetVulkanQueue();
- auto* fence_helper = device_queue_->GetFenceHelper();
-
- auto& current_image_data = images_[*acquired_image_];
- if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
- {
- current_image_data.command_buffer->Clear();
- ScopedSingleUseCommandBufferRecorder recorder(
- *current_image_data.command_buffer);
- current_image_data.command_buffer->TransitionImageLayout(
- current_image_data.image, current_image_data.layout,
- VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
- }
- current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
-
- VkSemaphore vk_semaphore = CreateSemaphore(device);
- // Submit our command_buffer for the current buffer. It sets the image
- // layout for presenting.
- if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
- &vk_semaphore)) {
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- return gfx::SwapResult::SWAP_FAILED;
- }
- current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
- end_write_semaphore_ = vk_semaphore;
- }
-
- VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
- present_info.waitSemaphoreCount = 1;
- present_info.pWaitSemaphores = &end_write_semaphore_;
- present_info.swapchainCount = 1;
- present_info.pSwapchains = &swap_chain_;
- present_info.pImageIndices = &acquired_image_.value();
-
- VkRectLayerKHR rect_layer;
- VkPresentRegionKHR present_region;
- VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
- if (is_incremental_present_supported_) {
- rect_layer.offset = {rect.x(), rect.y()};
- rect_layer.extent = {rect.width(), rect.height()};
- rect_layer.layer = 0;
+gfx::SwapResult VulkanSwapChain::PostSubBuffer(const gfx::Rect& rect) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!has_pending_post_sub_buffer_);
- present_region.rectangleCount = 1;
- present_region.pRectangles = &rect_layer;
-
- present_regions.swapchainCount = 1;
- present_regions.pRegions = &present_region;
-
- present_info.pNext = &present_regions;
- }
+ if (!PresentBuffer(rect))
+ return gfx::SwapResult::SWAP_FAILED;
- result = vkQueuePresentKHR(queue, &present_info);
- if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
- LOG(DFATAL) << "vkQueuePresentKHR() failed: " << result;
+ if (!AcquireNextImage())
return gfx::SwapResult::SWAP_FAILED;
- }
- current_image_data.is_acquired = false;
- LOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
+ return gfx::SwapResult::SWAP_ACK;
+}
- if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
- // |present_begin_semaphore| for the previous present for this image can be
- // safely destroyed after semaphore got from vkAcquireNextImageHKR() is
- // passed. That acquired semaphore should be already waited on for a
- // submitted GPU work. So we can safely enqueue the
- // |present_begin_semaphore| for cleanup here (the enqueued semaphore will
- // be destroyed when all submitted GPU work is finished).
- fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
- current_image_data.present_begin_semaphore);
+void VulkanSwapChain::PostSubBufferAsync(
+ const gfx::Rect& rect,
+ PostSubBufferCompletionCallback callback) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK(!has_pending_post_sub_buffer_);
+
+ if (!PresentBuffer(rect)) {
+ task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(std::move(callback), gfx::SwapResult::SWAP_FAILED));
+ return;
}
- // We are not sure when the semaphore is not used by present engine, so don't
- // destroy the semaphore until the image is returned from present engine.
- current_image_data.present_begin_semaphore = end_write_semaphore_;
- end_write_semaphore_ = VK_NULL_HANDLE;
- in_present_images_.emplace_back(*acquired_image_);
- acquired_image_.reset();
- return gfx::SwapResult::SWAP_ACK;
+ DCHECK_EQ(state_, VK_SUCCESS);
+
+ has_pending_post_sub_buffer_ = true;
+
+ post_sub_buffer_task_runner_->PostTask(
+ FROM_HERE,
+ base::BindOnce(
+ [](VulkanSwapChain* self, PostSubBufferCompletionCallback callback) {
+ base::AutoLock auto_lock(self->lock_);
+ DCHECK(self->has_pending_post_sub_buffer_);
+ auto swap_result = self->AcquireNextImage()
+ ? gfx::SwapResult::SWAP_ACK
+ : gfx::SwapResult::SWAP_FAILED;
+ self->task_runner_->PostTask(
+ FROM_HERE, base::BindOnce(std::move(callback), swap_result));
+ self->has_pending_post_sub_buffer_ = false;
+ self->condition_variable_.Signal();
+ },
+ base::Unretained(this), std::move(callback)));
}
bool VulkanSwapChain::InitializeSwapChain(
@@ -161,29 +143,35 @@ bool VulkanSwapChain::InitializeSwapChain(
VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory,
std::unique_ptr<VulkanSwapChain> old_swap_chain) {
- DCHECK(!acquired_image_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
VkDevice device = device_queue_->GetVulkanDevice();
VkResult result = VK_SUCCESS;
- VkSwapchainCreateInfoKHR swap_chain_create_info = {};
- swap_chain_create_info.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
- swap_chain_create_info.flags =
- use_protected_memory ? VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR : 0;
- swap_chain_create_info.surface = surface;
- swap_chain_create_info.minImageCount = min_image_count,
- swap_chain_create_info.imageFormat = surface_format.format;
- swap_chain_create_info.imageColorSpace = surface_format.colorSpace;
- swap_chain_create_info.imageExtent.width = image_size.width();
- swap_chain_create_info.imageExtent.height = image_size.height();
- swap_chain_create_info.imageArrayLayers = 1;
- swap_chain_create_info.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
- swap_chain_create_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
- swap_chain_create_info.preTransform = pre_transform;
- swap_chain_create_info.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
- swap_chain_create_info.presentMode = VK_PRESENT_MODE_FIFO_KHR;
- swap_chain_create_info.clipped = true;
- swap_chain_create_info.oldSwapchain =
- old_swap_chain ? old_swap_chain->swap_chain_ : VK_NULL_HANDLE;
+ VkSwapchainCreateInfoKHR swap_chain_create_info = {
+ .sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ .flags = use_protected_memory ? VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR : 0,
+ .surface = surface,
+ .minImageCount = min_image_count,
+ .imageFormat = surface_format.format,
+ .imageColorSpace = surface_format.colorSpace,
+ .imageExtent = {image_size.width(), image_size.height()},
+ .imageArrayLayers = 1,
+ .imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
+ .imageSharingMode = VK_SHARING_MODE_EXCLUSIVE,
+ .preTransform = pre_transform,
+ .compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ .presentMode = VK_PRESENT_MODE_FIFO_KHR,
+ .clipped = VK_TRUE,
+ .oldSwapchain = VK_NULL_HANDLE,
+ };
+ if (old_swap_chain) {
+ base::AutoLock auto_lock(old_swap_chain->lock_);
+ old_swap_chain->WaitUntilPostSubBufferAsyncFinished();
+ swap_chain_create_info.oldSwapchain = old_swap_chain->swap_chain_;
+ // Reuse |post_sub_buffer_task_runner_| from the |old_swap_chain|.
+ post_sub_buffer_task_runner_ = old_swap_chain->post_sub_buffer_task_runner_;
+ }
VkSwapchainKHR new_swap_chain = VK_NULL_HANDLE;
result = vkCreateSwapchainKHR(device, &swap_chain_create_info, nullptr,
@@ -204,10 +192,18 @@ bool VulkanSwapChain::InitializeSwapChain(
size_ = gfx::Size(swap_chain_create_info.imageExtent.width,
swap_chain_create_info.imageExtent.height);
+ if (!post_sub_buffer_task_runner_) {
+ post_sub_buffer_task_runner_ = base::ThreadPool::CreateSequencedTaskRunner(
+ {base::TaskPriority::USER_BLOCKING,
+ base::TaskShutdownBehavior::BLOCK_SHUTDOWN, base::MayBlock()});
+ }
+
return true;
}
void VulkanSwapChain::DestroySwapChain() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (swap_chain_ == VK_NULL_HANDLE)
return;
vkDestroySwapchainKHR(device_queue_->GetVulkanDevice(), swap_chain_,
@@ -217,6 +213,8 @@ void VulkanSwapChain::DestroySwapChain() {
bool VulkanSwapChain::InitializeSwapImages(
const VkSurfaceFormatKHR& surface_format) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
VkDevice device = device_queue_->GetVulkanDevice();
VkResult result = VK_SUCCESS;
@@ -250,6 +248,8 @@ bool VulkanSwapChain::InitializeSwapImages(
}
void VulkanSwapChain::DestroySwapImages() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
if (end_write_semaphore_)
vkDestroySemaphore(device_queue_->GetVulkanDevice(), end_write_semaphore_,
nullptr /* pAllocator */);
@@ -281,30 +281,38 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index,
VkImageLayout* image_layout,
VkSemaphore* semaphore) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(image);
DCHECK(image_index);
DCHECK(image_layout);
DCHECK(semaphore);
DCHECK(!is_writing_);
- VkSemaphore vk_semaphore = VK_NULL_HANDLE;
+ if (state_ != VK_SUCCESS)
+ return false;
+
+ if (!acquired_image_)
+ return false;
+
+ auto& current_image_data = images_[*acquired_image_];
- if (!acquired_image_) {
+ VkSemaphore vk_semaphore = VK_NULL_HANDLE;
+ if (current_image_data.present_end_semaphore != VK_NULL_HANDLE) {
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
- if (!AcquireNextImage())
- return false;
- DCHECK(acquired_image_);
- std::swap(vk_semaphore, images_[*acquired_image_].present_end_semaphore);
+ vk_semaphore = current_image_data.present_end_semaphore;
+ current_image_data.present_end_semaphore = VK_NULL_HANDLE;
} else {
- // In this case, PresentBuffer() is not called after
+ DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
+ // In this case, PostSubBuffer() is not called after
// {Begin,End}WriteCurrentImage pairs, |end_write_semaphore_| should be
// waited on before writing the image again.
- std::swap(vk_semaphore, end_write_semaphore_);
+ vk_semaphore = end_write_semaphore_;
+ end_write_semaphore_ = VK_NULL_HANDLE;
}
- auto& current_image_data = images_[*acquired_image_];
*image = current_image_data.image;
- *image_index = acquired_image_.value();
+ *image_index = *acquired_image_;
*image_layout = current_image_data.layout;
*semaphore = vk_semaphore;
is_writing_ = true;
@@ -314,6 +322,8 @@ bool VulkanSwapChain::BeginWriteCurrentImage(VkImage* image,
void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
VkSemaphore semaphore) {
+ base::AutoLock auto_lock(lock_);
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
DCHECK(is_writing_);
DCHECK(acquired_image_);
DCHECK(end_write_semaphore_ == VK_NULL_HANDLE);
@@ -324,29 +334,107 @@ void VulkanSwapChain::EndWriteCurrentImage(VkImageLayout image_layout,
is_writing_ = false;
}
-bool VulkanSwapChain::AcquireNextImage() {
- DCHECK(!acquired_image_);
+bool VulkanSwapChain::PresentBuffer(const gfx::Rect& rect) {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+ DCHECK_EQ(state_, VK_SUCCESS);
+ DCHECK(acquired_image_);
+ DCHECK(end_write_semaphore_ != VK_NULL_HANDLE);
+
+ VkResult result = VK_SUCCESS;
VkDevice device = device_queue_->GetVulkanDevice();
- // The Vulkan spec doesn't require vkAcquireNextImageKHR() returns images in
- // the present order for a vulkan swap chain. However for the best
- // performance, the driver should return images in order. To avoid buggy
- // drivers, we will call vkAcquireNextImageKHR() continually until the
- // expected image is returned.
- do {
- bool all_images_are_tracked = in_present_images_.size() == images_.size();
- if (all_images_are_tracked) {
- // Only check the expected_next_image, when all images are tracked.
- uint32_t expected_next_image = in_present_images_.front();
- // If the expected next image has been acquired, use it and return true.
- if (images_[expected_next_image].is_acquired) {
- in_present_images_.pop_front();
- acquired_image_.emplace(expected_next_image);
- break;
- }
+ VkQueue queue = device_queue_->GetVulkanQueue();
+ auto* fence_helper = device_queue_->GetFenceHelper();
+
+ auto& current_image_data = images_[*acquired_image_];
+ if (current_image_data.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) {
+ {
+ current_image_data.command_buffer->Clear();
+ ScopedSingleUseCommandBufferRecorder recorder(
+ *current_image_data.command_buffer);
+ current_image_data.command_buffer->TransitionImageLayout(
+ current_image_data.image, current_image_data.layout,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR);
}
+ current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VkSemaphore vk_semaphore = CreateSemaphore(device);
- DCHECK(vk_semaphore != VK_NULL_HANDLE);
+ // Submit our command_buffer for the current buffer. It sets the image
+ // layout for presenting.
+ if (!current_image_data.command_buffer->Submit(1, &end_write_semaphore_, 1,
+ &vk_semaphore)) {
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ return false;
+ }
+ current_image_data.layout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(end_write_semaphore_);
+ end_write_semaphore_ = vk_semaphore;
+ }
+
+ VkPresentInfoKHR present_info = {VK_STRUCTURE_TYPE_PRESENT_INFO_KHR};
+ present_info.waitSemaphoreCount = 1;
+ present_info.pWaitSemaphores = &end_write_semaphore_;
+ present_info.swapchainCount = 1;
+ present_info.pSwapchains = &swap_chain_;
+ present_info.pImageIndices = &acquired_image_.value();
+
+ VkRectLayerKHR rect_layer;
+ VkPresentRegionKHR present_region;
+ VkPresentRegionsKHR present_regions = {VK_STRUCTURE_TYPE_PRESENT_REGIONS_KHR};
+ if (is_incremental_present_supported_) {
+ rect_layer.offset = {rect.x(), rect.y()};
+ rect_layer.extent = {rect.width(), rect.height()};
+ rect_layer.layer = 0;
+
+ present_region.rectangleCount = 1;
+ present_region.pRectangles = &rect_layer;
+
+ present_regions.swapchainCount = 1;
+ present_regions.pRegions = &present_region;
+
+ present_info.pNext = &present_regions;
+ }
+
+ result = vkQueuePresentKHR(queue, &present_info);
+ if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
+ LOG(DFATAL) << "vkQueuePresentKHR() failed: " << result;
+ state_ = result;
+ return false;
+ }
+
+ LOG_IF(ERROR, result == VK_SUBOPTIMAL_KHR) << "Swapchian is suboptimal.";
+
+ if (current_image_data.present_begin_semaphore != VK_NULL_HANDLE) {
+ // |present_begin_semaphore| for the previous present for this image can be
+ // safely destroyed after semaphore got from vkAcquireNextImageHKR() is
+ // passed. That acquired semaphore should be already waited on for a
+ // submitted GPU work. So we can safely enqueue the
+ // |present_begin_semaphore| for cleanup here (the enqueued semaphore will
+ // be destroyed when all submitted GPU work is finished).
+ fence_helper->EnqueueSemaphoreCleanupForSubmittedWork(
+ current_image_data.present_begin_semaphore);
+ }
+ // We are not sure when the semaphore is not used by present engine, so don't
+ // destroy the semaphore until the image is returned from present engine.
+ current_image_data.present_begin_semaphore = end_write_semaphore_;
+ end_write_semaphore_ = VK_NULL_HANDLE;
+
+ acquired_image_.reset();
+
+ return true;
+}
+
+bool VulkanSwapChain::AcquireNextImage() {
+ DCHECK_EQ(state_, VK_SUCCESS);
+ DCHECK(!acquired_image_);
+
+ // VulkanDeviceQueue is not threadsafe for now, but |device_queue_| will not
+ // be released, and device_queue_->device will never be changed after
+ // initialization, so it is safe for now.
+ // TODO(penghuang): make VulkanDeviceQueue threadsafe.
+ VkDevice device = device_queue_->GetVulkanDevice();
+
+ VkSemaphore vk_semaphore = CreateSemaphore(device);
+ DCHECK(vk_semaphore != VK_NULL_HANDLE);
#if defined(USE_X11)
// The xserver should still composite windows with a 1Hz fake vblank when
@@ -361,44 +449,46 @@ bool VulkanSwapChain::AcquireNextImage() {
#else
constexpr uint64_t kTimeout = UINT64_MAX;
#endif
- // Acquire the next image.
- uint32_t next_image;
- auto result =
- vkAcquireNextImageKHR(device, swap_chain_, kTimeout, vk_semaphore,
- VK_NULL_HANDLE, &next_image);
- if (result == VK_TIMEOUT) {
- LOG(ERROR) << "vkAcquireNextImageKHR() hangs.";
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- state_ = VK_ERROR_SURFACE_LOST_KHR;
- return false;
- }
- if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
- LOG(DFATAL) << "vkAcquireNextImageKHR() failed: " << result;
- vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
- state_ = result;
- return false;
- }
+ // Acquire the next image.
+ uint32_t next_image;
+ auto result = ({
+ base::ScopedBlockingCall scoped_blocking_call(
+ FROM_HERE, base::BlockingType::WILL_BLOCK);
+ vkAcquireNextImageKHR(device, swap_chain_, kTimeout, vk_semaphore,
+ VK_NULL_HANDLE, &next_image);
+ });
+
+ if (result == VK_TIMEOUT) {
+ LOG(ERROR) << "vkAcquireNextImageKHR() hangs.";
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ state_ = VK_ERROR_SURFACE_LOST_KHR;
+ return false;
+ }
- DCHECK(!images_[next_image].is_acquired);
- DCHECK(images_[next_image].present_end_semaphore == VK_NULL_HANDLE);
- images_[next_image].is_acquired = true;
- images_[next_image].present_end_semaphore = vk_semaphore;
-
- auto it = std::find(in_present_images_.begin(), in_present_images_.end(),
- next_image);
- if (it == in_present_images_.end()) {
- DCHECK(!all_images_are_tracked);
- // Got an image which is not in the present queue due to the new created
- // swap chain. In this case, just use this image.
- acquired_image_.emplace(next_image);
- break;
- }
- LOG_IF(ERROR, it != in_present_images_.begin())
- << "vkAcquireNextImageKHR() returned an unexpected image.";
- } while (true);
+ if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR) {
+ LOG(DFATAL) << "vkAcquireNextImageKHR() failed: " << result;
+ vkDestroySemaphore(device, vk_semaphore, nullptr /* pAllocator */);
+ state_ = result;
+ return false;
+ }
+
+ DCHECK(images_[next_image].present_end_semaphore == VK_NULL_HANDLE);
+ images_[next_image].present_end_semaphore = vk_semaphore;
+ acquired_image_.emplace(next_image);
return true;
}
+void VulkanSwapChain::WaitUntilPostSubBufferAsyncFinished() {
+ DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
+
+ while (has_pending_post_sub_buffer_) {
+ base::ScopedBlockingCall scoped_blocking_call(
+ FROM_HERE, base::BlockingType::WILL_BLOCK);
+ condition_variable_.Wait();
+ }
+ DCHECK(acquired_image_ || state_ != VK_SUCCESS);
+}
+
VulkanSwapChain::ScopedWrite::ScopedWrite(VulkanSwapChain* swap_chain)
: swap_chain_(swap_chain) {
success_ = swap_chain_->BeginWriteCurrentImage(
diff --git a/chromium/gpu/vulkan/vulkan_swap_chain.h b/chromium/gpu/vulkan/vulkan_swap_chain.h
index bb92873da28..65261abdeb5 100644
--- a/chromium/gpu/vulkan/vulkan_swap_chain.h
+++ b/chromium/gpu/vulkan/vulkan_swap_chain.h
@@ -10,14 +10,22 @@
#include <memory>
#include <vector>
+#include "base/callback.h"
#include "base/component_export.h"
#include "base/containers/circular_deque.h"
-#include "base/logging.h"
+#include "base/memory/scoped_refptr.h"
#include "base/optional.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
#include "ui/gfx/geometry/rect.h"
#include "ui/gfx/geometry/size.h"
#include "ui/gfx/swap_result.h"
+namespace base {
+class SingleThreadTaskRunner;
+}
+
namespace gpu {
class VulkanCommandBuffer;
@@ -73,12 +81,36 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
void Destroy();
// Present the current buffer.
- gfx::SwapResult PresentBuffer(const gfx::Rect& rect);
-
- uint32_t num_images() const { return static_cast<uint32_t>(images_.size()); }
- const gfx::Size& size() const { return size_; }
- bool use_protected_memory() const { return use_protected_memory_; }
- VkResult state() const { return state_; }
+ gfx::SwapResult PostSubBuffer(const gfx::Rect& rect);
+ using PostSubBufferCompletionCallback =
+ base::OnceCallback<void(gfx::SwapResult)>;
+ void PostSubBufferAsync(const gfx::Rect& rect,
+ PostSubBufferCompletionCallback callback);
+
+ uint32_t num_images() const {
+ // size of |images_| will not be changed after initializing, so it is safe
+ // to read it here.
+ return static_cast<uint32_t>(TS_UNCHECKED_READ(images_).size());
+ }
+ const gfx::Size& size() const {
+ // |size_| is never changed after initialization.
+ return size_;
+ }
+ bool use_protected_memory() const {
+ // |use_protected_memory_| is never changed after initialization.
+ return use_protected_memory_;
+ }
+
+ uint32_t current_image_index() const {
+ base::AutoLock auto_lock(lock_);
+ DCHECK(acquired_image_);
+ return *acquired_image_;
+ }
+
+ VkResult state() const {
+ base::AutoLock auto_lock(lock_);
+ return state_;
+ }
private:
bool InitializeSwapChain(VkSurfaceKHR surface,
@@ -87,26 +119,31 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
uint32_t min_image_count,
VkSurfaceTransformFlagBitsKHR pre_transform,
bool use_protected_memory,
- std::unique_ptr<VulkanSwapChain> old_swap_chain);
- void DestroySwapChain();
+ std::unique_ptr<VulkanSwapChain> old_swap_chain)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DestroySwapChain() EXCLUSIVE_LOCKS_REQUIRED(lock_);
- bool InitializeSwapImages(const VkSurfaceFormatKHR& surface_format);
- void DestroySwapImages();
+ bool InitializeSwapImages(const VkSurfaceFormatKHR& surface_format)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DestroySwapImages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
bool BeginWriteCurrentImage(VkImage* image,
uint32_t* image_index,
VkImageLayout* layout,
VkSemaphore* semaphore);
void EndWriteCurrentImage(VkImageLayout layout, VkSemaphore semaphore);
- bool AcquireNextImage();
+ bool PresentBuffer(const gfx::Rect& rect) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ bool AcquireNextImage() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Wait until PostSubBufferAsync() is finished on ThreadPool.
+ void WaitUntilPostSubBufferAsyncFinished() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ mutable base::Lock lock_;
bool use_protected_memory_ = false;
VulkanDeviceQueue* device_queue_ = nullptr;
bool is_incremental_present_supported_ = false;
- VkSwapchainKHR swap_chain_ = VK_NULL_HANDLE;
-
+ VkSwapchainKHR swap_chain_ GUARDED_BY(lock_) = VK_NULL_HANDLE;
std::unique_ptr<VulkanCommandPool> command_pool_;
-
gfx::Size size_;
struct ImageData {
@@ -123,18 +160,34 @@ class COMPONENT_EXPORT(VULKAN) VulkanSwapChain {
VkSemaphore present_begin_semaphore = VK_NULL_HANDLE;
// Semaphore signaled when present engine is done with the image.
VkSemaphore present_end_semaphore = VK_NULL_HANDLE;
- // True indicates the image is acquired from swapchain and haven't sent back
- // to swapchain for presenting.
- bool is_acquired = false;
};
- std::vector<ImageData> images_;
-
- // Acquired image index.
- base::circular_deque<uint32_t> in_present_images_;
- base::Optional<uint32_t> acquired_image_;
- bool is_writing_ = false;
- VkSemaphore end_write_semaphore_ = VK_NULL_HANDLE;
- VkResult state_ = VK_SUCCESS;
+
+ // Images in the swap chain.
+ std::vector<ImageData> images_ GUARDED_BY(lock_);
+
+ base::circular_deque<uint32_t> in_present_images_ GUARDED_BY(lock_);
+ bool is_writing_ GUARDED_BY(lock_) = false;
+ VkSemaphore end_write_semaphore_ GUARDED_BY(lock_) = VK_NULL_HANDLE;
+
+ // Condition variable is signalled when a PostSubBufferAsync() is finished.
+ base::ConditionVariable condition_variable_{&lock_};
+
+ // True if there is pending post sub buffer in the fly.
+ bool has_pending_post_sub_buffer_ GUARDED_BY(lock_) = false;
+
+ // The current swapchain state_.
+ VkResult state_ GUARDED_BY(lock_) = VK_SUCCESS;
+
+ // Acquired images queue.
+ base::Optional<uint32_t> acquired_image_ GUARDED_BY(lock_);
+
+ // For executing task on GPU main thread.
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ // For executing PosSubBufferAsync tasks off the GPU main thread.
+ scoped_refptr<base::SequencedTaskRunner> post_sub_buffer_task_runner_;
+
+ THREAD_CHECKER(thread_checker_);
DISALLOW_COPY_AND_ASSIGN(VulkanSwapChain);
};
diff --git a/chromium/gpu/vulkan/vulkan_util.cc b/chromium/gpu/vulkan/vulkan_util.cc
index 8a9661a3301..ca52a27bfaa 100644
--- a/chromium/gpu/vulkan/vulkan_util.cc
+++ b/chromium/gpu/vulkan/vulkan_util.cc
@@ -7,6 +7,9 @@
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/strings/stringprintf.h"
+#include "build/build_config.h"
+#include "gpu/config/gpu_info.h" // nogncheck
+#include "gpu/config/vulkan_info.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
namespace gpu {
@@ -100,4 +103,48 @@ void ReportQueueSubmitPerSwapBuffers() {
last_count = g_submit_count;
}
+bool CheckVulkanCompabilities(const VulkanInfo& vulkan_info,
+ const GPUInfo& gpu_info) {
+// Android uses AHB and SyncFD for interop. They are imported into GL with other
+// API.
+#if !defined(OS_ANDROID)
+#if defined(OS_WIN)
+ constexpr char kMemoryObjectExtension[] = "GL_EXT_memory_object_win32";
+ constexpr char kSemaphoreExtension[] = "GL_EXT_semaphore_win32";
+#elif defined(OS_FUCHSIA)
+ constexpr char kMemoryObjectExtension[] = "GL_ANGLE_memory_object_fuchsia";
+ constexpr char kSemaphoreExtension[] = "GL_ANGLE_semaphore_fuchsia";
+#else
+ constexpr char kMemoryObjectExtension[] = "GL_EXT_memory_object_fd";
+ constexpr char kSemaphoreExtension[] = "GL_EXT_semaphore_fd";
+#endif
+ // If both Vulkan and GL are using native GPU (non swiftshader), check
+ // necessary extensions for GL and Vulkan interop.
+ const auto extensions = gfx::MakeExtensionSet(gpu_info.gl_extensions);
+ if (!gfx::HasExtension(extensions, kMemoryObjectExtension) ||
+ !gfx::HasExtension(extensions, kSemaphoreExtension)) {
+ DLOG(ERROR) << kMemoryObjectExtension << " or " << kSemaphoreExtension
+ << " is not supported.";
+ return false;
+ }
+#endif // !defined(OS_ANDROID)
+
+#if defined(OS_ANDROID)
+ if (vulkan_info.physical_devices.empty())
+ return false;
+
+ const auto& device_info = vulkan_info.physical_devices.front();
+ constexpr uint32_t kVendorARM = 0x13b5;
+
+ // https://crbug.com/1096222: Display problem with Huawei and Honor devices
+ // with Mali GPU. The Mali driver version is < 19.0.0.
+ if (device_info.properties.vendorID == kVendorARM &&
+ device_info.properties.driverVersion < VK_MAKE_VERSION(19, 0, 0)) {
+ return false;
+ }
+#endif // defined(OS_ANDROID)
+
+ return true;
+}
+
} // namespace gpu \ No newline at end of file
diff --git a/chromium/gpu/vulkan/vulkan_util.h b/chromium/gpu/vulkan/vulkan_util.h
index e6f44251aca..f2d6c5cb879 100644
--- a/chromium/gpu/vulkan/vulkan_util.h
+++ b/chromium/gpu/vulkan/vulkan_util.h
@@ -18,6 +18,9 @@
namespace gpu {
+struct GPUInfo;
+class VulkanInfo;
+
// Submits semaphores to be signaled to the vulkan queue. Semaphores are
// signaled once this submission is executed. vk_fence is an optional handle
// to fence to be signaled once this submission completes execution.
@@ -79,6 +82,10 @@ VKAPI_ATTR VkResult VKAPI_CALL QueueSubmitHook(VkQueue queue,
COMPONENT_EXPORT(VULKAN) void ReportQueueSubmitPerSwapBuffers();
+COMPONENT_EXPORT(VULKAN)
+bool CheckVulkanCompabilities(const VulkanInfo& vulkan_info,
+ const GPUInfo& gpu_info);
+
} // namespace gpu
#endif // GPU_VULKAN_VULKAN_UTIL_H_
diff --git a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
index 6e8f4cdeaf0..ec752c329d4 100644
--- a/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_implementation_x11.cc
@@ -77,7 +77,7 @@ VulkanImplementationX11::VulkanImplementationX11(bool use_swiftshader)
gfx::GetXDisplay();
}
-VulkanImplementationX11::~VulkanImplementationX11() {}
+VulkanImplementationX11::~VulkanImplementationX11() = default;
bool VulkanImplementationX11::InitializeVulkanInstance(bool using_surface) {
if (using_surface && !use_swiftshader() && !IsVulkanSurfaceSupported())
@@ -126,7 +126,8 @@ std::unique_ptr<VulkanSurface> VulkanImplementationX11::CreateViewSurface(
gfx::AcceleratedWidget window) {
if (!using_surface_)
return nullptr;
- return VulkanSurfaceX11::Create(vulkan_instance_.vk_instance(), window);
+ return VulkanSurfaceX11::Create(vulkan_instance_.vk_instance(),
+ static_cast<x11::Window>(window));
}
bool VulkanImplementationX11::GetPhysicalDevicePresentationSupport(
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
index 3bb675440cf..592a574a99e 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.cc
@@ -7,6 +7,7 @@
#include "base/logging.h"
#include "gpu/vulkan/vulkan_function_pointers.h"
#include "ui/events/platform/x11/x11_event_source.h"
+#include "ui/gfx/native_widget_types.h"
namespace gpu {
@@ -14,7 +15,8 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
public:
explicit ExposeEventForwarder(VulkanSurfaceX11* surface) : surface_(surface) {
if (auto* event_source = ui::X11EventSource::GetInstance()) {
- XSelectInput(gfx::GetXDisplay(), surface_->window_, ExposureMask);
+ XSelectInput(gfx::GetXDisplay(), static_cast<uint32_t>(surface_->window_),
+ ExposureMask);
event_source->AddXEventDispatcher(this);
}
}
@@ -25,7 +27,7 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
}
// ui::XEventDispatcher:
- bool DispatchXEvent(XEvent* xevent) override {
+ bool DispatchXEvent(x11::Event* xevent) override {
if (!surface_->CanDispatchXEvent(xevent))
return false;
surface_->ForwardXExposeEvent(xevent);
@@ -40,17 +42,19 @@ class VulkanSurfaceX11::ExposeEventForwarder : public ui::XEventDispatcher {
// static
std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
VkInstance vk_instance,
- Window parent_window) {
+ x11::Window parent_window) {
XDisplay* display = gfx::GetXDisplay();
XWindowAttributes attributes;
- if (!XGetWindowAttributes(display, parent_window, &attributes)) {
- LOG(ERROR) << "XGetWindowAttributes failed for window " << parent_window
- << ".";
+ if (!XGetWindowAttributes(display, static_cast<uint32_t>(parent_window),
+ &attributes)) {
+ LOG(ERROR) << "XGetWindowAttributes failed for window "
+ << static_cast<uint32_t>(parent_window) << ".";
return nullptr;
}
- Window window = XCreateWindow(display, parent_window, 0, 0, attributes.width,
- attributes.height, 0, CopyFromParent,
- InputOutput, CopyFromParent, 0, nullptr);
+ Window window = XCreateWindow(
+ display, static_cast<uint32_t>(parent_window), 0, 0, attributes.width,
+ attributes.height, 0, static_cast<int>(x11::WindowClass::CopyFromParent),
+ static_cast<int>(x11::WindowClass::InputOutput), nullptr, 0, nullptr);
if (!window) {
LOG(ERROR) << "XCreateWindow failed.";
return nullptr;
@@ -68,42 +72,45 @@ std::unique_ptr<VulkanSurfaceX11> VulkanSurfaceX11::Create(
DLOG(ERROR) << "vkCreateXlibSurfaceKHR() failed: " << result;
return nullptr;
}
- return std::make_unique<VulkanSurfaceX11>(vk_instance, vk_surface,
- parent_window, window);
+ return std::make_unique<VulkanSurfaceX11>(
+ vk_instance, vk_surface, parent_window, static_cast<x11::Window>(window));
}
VulkanSurfaceX11::VulkanSurfaceX11(VkInstance vk_instance,
VkSurfaceKHR vk_surface,
- Window parent_window,
- Window window)
+ x11::Window parent_window,
+ x11::Window window)
: VulkanSurface(vk_instance,
- window,
+ static_cast<gfx::AcceleratedWidget>(window),
vk_surface,
false /* use_protected_memory */),
parent_window_(parent_window),
window_(window),
expose_event_forwarder_(new ExposeEventForwarder(this)) {}
-VulkanSurfaceX11::~VulkanSurfaceX11() {}
+VulkanSurfaceX11::~VulkanSurfaceX11() = default;
// VulkanSurface:
bool VulkanSurfaceX11::Reshape(const gfx::Size& size,
gfx::OverlayTransform pre_transform) {
DCHECK_EQ(pre_transform, gfx::OVERLAY_TRANSFORM_NONE);
- XResizeWindow(gfx::GetXDisplay(), window_, size.width(), size.height());
+ XResizeWindow(gfx::GetXDisplay(), static_cast<uint32_t>(window_),
+ size.width(), size.height());
return VulkanSurface::Reshape(size, pre_transform);
}
-bool VulkanSurfaceX11::CanDispatchXEvent(const XEvent* event) {
- return event->type == Expose && event->xexpose.window == window_;
+bool VulkanSurfaceX11::CanDispatchXEvent(const x11::Event* x11_event) {
+ const XEvent* event = &x11_event->xlib_event();
+ return event->type == Expose &&
+ event->xexpose.window == static_cast<uint32_t>(window_);
}
-void VulkanSurfaceX11::ForwardXExposeEvent(const XEvent* event) {
- XEvent forwarded_event = *event;
- forwarded_event.xexpose.window = parent_window_;
- XSendEvent(gfx::GetXDisplay(), parent_window_, False, ExposureMask,
- &forwarded_event);
+void VulkanSurfaceX11::ForwardXExposeEvent(const x11::Event* event) {
+ XEvent forwarded_event = event->xlib_event();
+ forwarded_event.xexpose.window = static_cast<uint32_t>(parent_window_);
+ XSendEvent(gfx::GetXDisplay(), static_cast<uint32_t>(parent_window_), False,
+ ExposureMask, &forwarded_event);
XFlush(gfx::GetXDisplay());
}
diff --git a/chromium/gpu/vulkan/x/vulkan_surface_x11.h b/chromium/gpu/vulkan/x/vulkan_surface_x11.h
index 5c99d6ea907..585fe197d26 100644
--- a/chromium/gpu/vulkan/x/vulkan_surface_x11.h
+++ b/chromium/gpu/vulkan/x/vulkan_surface_x11.h
@@ -9,6 +9,7 @@
#include "base/macros.h"
#include "gpu/vulkan/vulkan_surface.h"
+#include "ui/gfx/x/event.h"
#include "ui/gfx/x/x11_types.h"
namespace gpu {
@@ -16,11 +17,11 @@ namespace gpu {
class VulkanSurfaceX11 : public VulkanSurface {
public:
static std::unique_ptr<VulkanSurfaceX11> Create(VkInstance vk_instance,
- Window parent_window);
+ x11::Window parent_window);
VulkanSurfaceX11(VkInstance vk_instance,
VkSurfaceKHR vk_surface,
- Window parent_window,
- Window window);
+ x11::Window parent_window,
+ x11::Window window);
~VulkanSurfaceX11() override;
// VulkanSurface:
@@ -29,11 +30,11 @@ class VulkanSurfaceX11 : public VulkanSurface {
private:
class ExposeEventForwarder;
- bool CanDispatchXEvent(const XEvent* event);
- void ForwardXExposeEvent(const XEvent* event);
+ bool CanDispatchXEvent(const x11::Event* event);
+ void ForwardXExposeEvent(const x11::Event* event);
- const Window parent_window_;
- const Window window_;
+ const x11::Window parent_window_;
+ const x11::Window window_;
std::unique_ptr<ExposeEventForwarder> expose_event_forwarder_;
DISALLOW_COPY_AND_ASSIGN(VulkanSurfaceX11);